/* code */
//打开文件和指定的数据集
hsize_t i,j;
H5File file("../data/label_test/54000/54094.hdf5", H5F_ACC_RDONLY);
Group rg(file.getObjId("/"));
DataSet dset(rg.getObjId("/data/axuv"));
DataSet basic(rg.getObjId("/data/basic"));
//获取指定数据集的文件空间,方便后续获取其维度
DataSpace filespace = dset.getSpace();
//获取其维度
int rank = filespace.getSimpleExtentNdims();
//获取每一维度的大小,并用数组存储
hsize_t dims[rank];
rank = filespace.getSimpleExtentDims(dims);
DataSpace mspace1(rank, dims);
// 输出各维度的大小
for (int i = 0; i < rank; i++){
cout << "Dimension_" << i + 1 << " = " << dims[i] << endl;
}
// delete[]dims;
// dims = nullptr;
double data_out [dims[0]] [dims[1]];
std::vector<vector<float>> array(dims[0],vector<float>(dims[1]));
dset.read(data_out,PredType::NATIVE_DOUBLE, mspace1, filespace);
for (j = 0; j < dims[0]; j++) {
for (i = 0; i < dims[1]; i++)
array[j][i]=data_out[j][i];
}
cout << data_out[0][0] << endl;
filespace.close();
dset.close();
//获取指定数据集的文件空间,方便后续获取其维度
DataSpace filespace2 = basic.getSpace();
//获取其维度
int rank2 = filespace2.getSimpleExtentNdims();
//获取每一维度的大小,并用数组存储
hsize_t dims2[rank2];
rank2 = filespace2.getSimpleExtentDims(dims2);
DataSpace mspace2(rank2, dims2);
// 输出各维度的大小
for (int i = 0; i < rank2; i++){
cout << "Dimension_" << i + 1 << " = " << dims2[i] << endl;
}
// delete[]dims;
// dims = nullptr;
double data_basic [dims2[0]] [dims2[1]];
std::vector<vector<float>> array_basic(dims2[0],vector<float>(dims2[1]));
basic.read(data_basic,PredType::NATIVE_DOUBLE, mspace2, filespace2);
for (j = 0; j < dims2[0]; j++) {
for (i = 0; i < dims2[1]; i++)
array_basic[j][i]=data_basic[j][i];
}
cout << data_basic[0] << endl;
filespace.close();
basic.close();
rg.close();
file.close();
cout << endl << endl;
string model_path="./et1";
string input_tensor_name1="serving_default_input_1";
string input_tensor_name2="serving_default_input_2";
string output_tensor_name="StatefulPartitionedCall:0";
tensorflow::SessionOptions sess_options;
tensorflow::RunOptions run_options;
tensorflow::SavedModelBundle bundle;
Status status_create=LoadSavedModel(sess_options,run_options,model_path, {tensorflow::kSavedModelTagServe}, &bundle);
if(!status_create.ok()){
cout << "ERROR: Creating graph in session failed..." << status_create.ToString() << std::endl;
return -1;
}
cout << "<----Successfully created session and load graph.------->"<< endl;
cout<<endl<<"<------------loading data-------------->"<<endl;
auto input_tensor1 = MakeTensor(array[0], {1, 16});
auto input_tensor2 = MakeTensor(array_basic[0], {1, 9});
cout << input_tensor1.DebugString()<<endl;
cout << input_tensor2.DebugString()<<endl;
std::vector<tensorflow::Tensor> out_tensors;
TF_CHECK_OK(bundle.GetSession()->Run({{input_tensor_name1, input_tensor1},{input_tensor_name2, input_tensor2}},
{output_tensor_name}, {}, &out_tensors));
int class_id=GetLabelFromOutput(out_tensors);
cout << class_id << endl;
Tensor MakeTensor(std::vector<float> const& batch, BatchDef const& batch_def) {
Tensor t(DT_FLOAT,
TensorShape(batch_def));
for (int i = 0; i < batch.size(); ++i) {
t.flat<float>()(i) = batch[i];
}
return t;
}
int GetLabelFromOutput(vector<tensorflow::Tensor> outputs)
{
//把输出值给提取出来
Tensor t = outputs[0]; // Fetch the first tensor
auto tmap = t.tensor<float, 2>(); // Tensor Shape: [batch_size, target_class_num]
int output_dim = t.shape().dim_size(1); // Get the target_class_num from 1st dimension
// Argmax: Get Final Prediction Label and Probability
int output_class_id = -1;
double output_prob = 0.0;
for (int j = 0; j < output_dim; j++)
{
if (tmap(0, j) >= output_prob) {
output_class_id = j;
output_prob = tmap(0, j);
}
}
return output_class_id;
}
本文章使用limfx的vscode插件快速发布