目录
cpp
private:
ncnn::Net landmark;
};
模型加载:
cpp
char parampath[256];
char modelpath[256];
sprintf(parampath, "%s.param", modeltype);
sprintf(modelpath, "%s.bin", modeltype);
landmark.load_param(mgr, parampath);
landmark.load_model(mgr, modelpath);
加载模型,获取模型输入,输出:
cpp
bool MobileNetInference::load_model(AAssetManager* mgr,const std::string& param_path,
const std::string& bin_path) {
ncnn::set_cpu_powersave(2);
net_.opt = ncnn::Option();
#if NCNN_VULKAN
net_.opt.use_vulkan_compute = use_gpu_;
#endif
net_.opt.num_threads = ncnn::get_big_cpu_count();
// 1. 加载参数文件
int ret = net_.load_param(mgr, param_path.c_str());
if (ret != 0) {
return false;
}
// 2. 加载模型权重
ret = net_.load_model(mgr, bin_path.c_str());
if (ret != 0) {
return false;
}
// 配置网络选项
// net_.opt.use_vulkan_compute = use_gpu_;
// net_.opt.num_threads = num_threads_;
model_loaded_ = true;
const std::vector<const char*>& input_names = net_.input_names();
const std::vector<const char*>& output_names = net_.output_names();
std::cout << "Input names: ";
for (auto name : input_names) {
std::cout << name << " ";
}
std::cout << "Output names: ";
for (auto name : output_names) {
std::cout << name << " ";
}
std::cout << "Model loaded successfully from:" << std::endl;
std::cout << " - " << param_path << std::endl;
std::cout << " - " << bin_path << std::endl;
std::cout << "Using " << (use_gpu_ ? "GPU (Vulkan)" : "CPU")
<< " with " << num_threads_ << " threads." << std::endl;
return true;
}
分类推理:
cpp
JNIEXPORT jboolean JNICALL Java_com_example_classroom_NcnnMediapipeHand_loadModel(JNIEnv* env, jobject thiz, jobject assetManager, jint modelid, jint cpugpu)
{
if (modelid < 0 || modelid > 6 || cpugpu < 0 || cpugpu > 1)
{
return JNI_FALSE;
}
AAssetManager *mgr = AAssetManager_fromJava(env, assetManager);
if (0) {
__android_log_print(ANDROID_LOG_DEBUG, "ncnn", "loadModel %p", mgr);
// 默认参数
std::string model_param = "model_traced.ncnn.param";
std::string model_bin = "model_traced.ncnn.bin";
std::string image_path = "22.jpg";
__android_log_print(ANDROID_LOG_ERROR, "ncnn", "image_path %s ", image_path.c_str());
// 1. 创建推理器实例
MobileNetInference mobilenet;
// 2. 配置参数
// mobilenet.set_use_gpu(true); // 启用GPU加速
mobilenet.set_num_threads(2); // 设置4个线程
// 3. 加载模型
if (!mobilenet.load_model(mgr, model_param, model_bin)) {
__android_log_print(ANDROID_LOG_ERROR, "ncnn", " Failed to load model: %s",
model_bin.c_str());
return JNI_TRUE;
}
cv::Mat img = readImageFromAssets(mgr, image_path.c_str());
// 4. 加载图像
// cv::Mat img = cv::imread(image_path);
// cv::Mat img = cv::Mat::zeros(64, 64, CV_8UC3); // 224x224, 3通道, 全0(黑色)
if (img.empty()) {
__android_log_print(ANDROID_LOG_ERROR, "ncnn", " Failed to load image: %s",
image_path.c_str());
return JNI_TRUE;
}
__android_log_print(ANDROID_LOG_ERROR, "ncnn", "img.cols %d,img.rows %d ", img.cols,
img.rows);
std::vector<float> scores = mobilenet.inference(img);
if (scores.empty()) {
__android_log_print(ANDROID_LOG_ERROR, "ncnn", " Inference failed");
return JNI_TRUE;
}
__android_log_print(ANDROID_LOG_ERROR, "ncnn", " inference_time %ld ms scores.size:%zu ",
mobilenet.get_last_inference_time(), scores.size());
// 6. 获取top-5结果
auto top5 = MobileNetInference::get_top_k(scores, 1);
// 7. 加载标签并显示结果
std::vector <std::string> labels;
}