CHEN Yihui 3 vuotta sitten
säilyke
1974eadfc2
1 muutettua tiedostoa jossa 173 lisäystä ja 0 poistoa
  1. 173 0
      infer.cc

+ 173 - 0
infer.cc

@@ -0,0 +1,173 @@
+#include "include/version.h"
+#include "include/model.h"
+#include "include/version.h"
+#include "include/context.h"
+#include "include/errorcode.h"
+#include "include/lite_session.h"
+
+#include <iostream>
+#include <string>
+#include <cstdio>
+
+#include "imagenet_label.inc"
+
+// model size limit: 64 MiB
+const int MAX_MODEL_SIZE = 64 * 1024 * 1024;
+
+// dataset
+const std::string IMAGE_FILE = "/data/val_data_c/%05d.bin";
+
+using namespace mindspore;
+using namespace mindspore::lite;
+using namespace mindspore::session;
+
+void read_image(int idx, void *tensor_buf, size_t size)
+{
+  char image[128];
+  sprintf(image, IMAGE_FILE.c_str(), idx);
+  FILE *fp = fopen(image, "rb");
+  fread(tensor_buf, sizeof(char), size, fp);
+  fclose(fp);
+}
+
+void print_tensor(tensor::MSTensor *t)
+{
+  float *data_ptr = static_cast<float *>(t->MutableData());
+  for (int i = 0; i < t->ElementsNum(); ++i)
+  {
+    std::cout << data_ptr[i] << ", ";
+    if (i % 13 == 12)
+    {
+      std::cout << std::endl;
+    }
+  }
+}
+
+int arg_max(tensor::MSTensor *t)
+{
+  float *data_ptr = static_cast<float *>(t->MutableData());
+  float max_val = 0.f;
+  int max_idx = -1;
+  for (int i = 0; i < t->ElementsNum(); ++i)
+  {
+    if (data_ptr[i] > max_val)
+    {
+      max_idx = i;
+      max_val = data_ptr[i];
+    }
+  }
+  return max_idx;
+}
+
+int main(int argc, const char *argv[])
+{
+  if (argc != 3)
+  {
+    std::cout << "usage: ./classification your_model.ms image_num" << std::endl;
+    return -1;
+  }
+  std::string version = mindspore::lite::Version();
+  std::cout << "version: " << version << std::endl;
+
+  // load model
+  FILE *fp = fopen(argv[1], "rb");
+  char *model_buf = new char[MAX_MODEL_SIZE];
+  size_t model_size = fread(model_buf, sizeof(char), MAX_MODEL_SIZE, fp);
+  fclose(fp);
+  std::cout << "model: " << argv[1] << ", size: " << model_size << " Bytes" << std::endl;
+
+  Model *model = Model::Import(model_buf, model_size);
+
+  // create context
+  Context *context = new (std::nothrow) Context;
+  if (context == nullptr)
+  {
+    std::cerr << "New context failed while running %s", argv[1];
+    return RET_ERROR;
+  }
+  CpuDeviceInfo &cpu_decice_info = context->device_list_[0].device_info_.cpu_device_info_;
+  cpu_decice_info.cpu_bind_mode_ = HIGHER_CPU;
+  context->thread_num_ = 2;
+
+  // create session1
+  LiteSession *session = LiteSession::CreateSession(context);
+  delete (context);
+  if (session == nullptr)
+  {
+    std::cerr << "CreateSession failed while running %s", argv[1];
+    return RET_ERROR;
+  }
+
+  // compile graph
+
+  int ret = session->CompileGraph(model);
+  if (ret != RET_OK)
+  {
+    std::cerr << "CompileGraph failed" << std::endl;
+    // session and model need to be released by users manually.
+    delete (session);
+    delete (model);
+    return ret;
+  }
+  model->Free();
+
+  // alloc input mem
+  std::vector<tensor::MSTensor *> inputs = session->GetInputs();
+  tensor::MSTensor *input = inputs.front();
+  void *input_buf = input->MutableData();
+  std::cout << "input tenosr num: " << inputs.size() << std::endl;
+  std::cout << "input tensor[0] shape: ";
+  for (int i : input->shape())
+  {
+    std::cout << i << " ";
+  }
+  std::cout << std::endl;
+
+  // get output
+  std::unordered_map<std::string, tensor::MSTensor *> outputs = session->GetOutputs();
+  tensor::MSTensor *output = outputs.begin()->second;
+  std::cout << "output tenosr num: " << outputs.size() << std::endl;
+  std::cout << "output tensor[0] name: " << outputs.begin()->first << ", shape: ";
+  void *output_buf = output->MutableData();
+  for (int i : output->shape())
+  {
+    std::cout << i << " ";
+  }
+  std::cout << std::endl;
+  // infer
+  std::vector<int> result;
+  int IMAGE_NUM = std::atoi(argv[2]);
+
+  std::cout << "inference start" << std::endl;
+  for (size_t i = 0; i < IMAGE_NUM; i++)
+  {
+    read_image(i, input_buf, input->Size());
+    int ret = session->RunGraph();
+    if (ret != RET_OK)
+    {
+      std::cerr << "Run graph failed." << std::endl;
+      return RET_ERROR;
+    }
+    //print_tensor(output);
+    //std::cout << arg_max(output) << std::endl;
+    result.push_back(arg_max(output));
+    std::cout << "\r" << i * 100 / IMAGE_NUM << "%, ";
+    for (int j = 0; j < i * 80 / IMAGE_NUM; ++j)
+    {
+      std::cout << '*';
+    }
+  }
+  std::cout << std::endl;
+  std::cout << "inference finished" << std::endl;
+
+  int correct = 0;
+  for (int i = 0; i < IMAGE_NUM; ++i)
+  {
+    if (label[i] == result[i] - 1)
+    {
+      correct++;
+    }
+  }
+  std::cout << "top1 acc: " << correct / (float)IMAGE_NUM << std::endl;
+  return 0;
+}