目录
[jni ncnn 推理报错:](#jni ncnn 推理报错:)
[Fatal signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr](#Fatal signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr)
jni ncnn 推理报错:
Fatal signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr
代码调用正常,所以是析构时报错,解决方法:
jni/cls_mobilenet.cpp
cpp
MobileNetInference::~MobileNetInference() {
net_.clear(); // <-- 必须先 clear
#if NCNN_VULKAN
ncnn::destroy_gpu_instance();
#endif
}
// 加载模型
bool MobileNetInference::load_model(AAssetManager* mgr,const std::string& param_path,
const std::string& bin_path) {
ncnn::set_cpu_powersave(2);
net_.opt = ncnn::Option();
#if NCNN_VULKAN
if (use_gpu_) {
// 创建 GPU 实例 --- 注意:最好在全局只创建一次(这里示例化创建)
ncnn::create_gpu_instance();
net_.opt.use_vulkan_compute = true;
} else {
net_.opt.use_vulkan_compute = false;
}
#endif
// 使用你设置的线程数而不是 get_big_cpu_count()(或至少赋值)
net_.opt.num_threads = num_threads_;
net_.opt.num_threads = ncnn::get_big_cpu_count();
// 1. 加载参数文件
int ret = net_.load_param(mgr, param_path.c_str());
if (ret != 0) {
return false;
}
// 2. 加载模型权重
ret = net_.load_model(mgr, bin_path.c_str());
if (ret != 0) {
return false;
}
model_loaded_ = true;
const std::vector<const char*>& input_names = net_.input_names();
const std::vector<const char*>& output_names = net_.output_names();
std::cout << "Input names: ";
for (auto name : input_names) {
std::cout << name << " ";
}
std::cout << "Output names: ";
for (auto name : output_names) {
std::cout << name << " ";
}
std::cout << "Model loaded successfully from:" << std::endl;
std::cout << " - " << param_path << std::endl;
std::cout << " - " << bin_path << std::endl;
std::cout << "Using " << (use_gpu_ ? "GPU (Vulkan)" : "CPU")
<< " with " << num_threads_ << " threads." << std::endl;
return true;
}