C# Onnx E2Pose人体关键点检测
目录
效果
模型信息
Inputs
name:inputimg
tensor:Float[1, 3, 512, 512]
Outputs
name:kvxy/concat
tensor:Float[1, 341, 17, 3]
name:pv/concat
tensor:Float[1, 341, 1, 1]
项目
代码
using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using OpenCvSharp;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Windows.Forms;
namespace Onnx_Demo
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
string image_path = "";
string startupPath;
DateTime dt1 = DateTime.Now;
DateTime dt2 = DateTime.Now;
string model_path;
Mat image;
Mat result_image;
SessionOptions options;
InferenceSession onnx_session;
Tensor<float> input_tensor;
List<NamedOnnxValue> input_container;
IDisposableReadOnlyCollection<DisposableNamedOnnxValue> result_infer;
DisposableNamedOnnxValue[] results_onnxvalue;
Tensor<float> result_tensors;
int inpHeight, inpWidth;
float confThreshold;
int[] connect_list = { 0, 1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 6, 5, 7, 7, 9, 6, 8, 8, 10, 5, 11, 6, 12, 11, 12, 11, 13, 13, 15, 12, 14, 14, 16 };
private void button1_Click(object sender, EventArgs e)
{
OpenFileDialog ofd = new OpenFileDialog();
ofd.Filter = fileFilter;
if (ofd.ShowDialog() != DialogResult.OK) return;
pictureBox1.Image = null;
image_path = ofd.FileName;
pictureBox1.Image = new Bitmap(image_path);
textBox1.Text = "";
image = new Mat(image_path);
pictureBox2.Image = null;
}
unsafe private void button2_Click(object sender, EventArgs e)
{
if (image_path == "")
{
return;
}
button2.Enabled = false;
pictureBox2.Image = null;
textBox1.Text = "";
Application.DoEvents();
//读图片
image = new Mat(image_path);
//将图片转为RGB通道
Mat image_rgb = new Mat();
Cv2.CvtColor(image, image_rgb, ColorConversionCodes.BGR2RGB);
Cv2.Resize(image_rgb, image_rgb, new OpenCvSharp.Size(inpHeight, inpWidth));
//输入Tensor
input_tensor = new DenseTensor<float>(new[] { 1, 3, inpHeight, inpWidth });
for (int y = 0; y < image_rgb.Height; y++)
{
for (int x = 0; x < image_rgb.Width; x++)
{
input_tensor[0, 0, y, x] = image_rgb.At<Vec3b>(y, x)[0];
input_tensor[0, 1, y, x] = image_rgb.At<Vec3b>(y, x)[1];
input_tensor[0, 2, y, x] = image_rgb.At<Vec3b>(y, x)[2];
}
}
//将 input_tensor 放入一个输入参数的容器,并指定名称
input_container.Add(NamedOnnxValue.CreateFromTensor("inputimg", input_tensor));
dt1 = DateTime.Now;
//运行 Inference 并获取结果
result_infer = onnx_session.Run(input_container);
dt2 = DateTime.Now;
// 将输出结果转为DisposableNamedOnnxValue数组
results_onnxvalue = result_infer.ToArray();
float[] kpt = results_onnxvalue[0].AsTensor<float>().ToArray();
float[] pv = results_onnxvalue[1].AsTensor<float>().ToArray();
float[] temp = new float[51];
int num_proposal = 341;
int num_pts = 17;
int len = num_pts * 3;
List<List<int>> results = new List<List<int>>();
for (int i = 0; i < num_proposal; i++)
{
Array.Copy(kpt, i * 51, temp, 0, 51);
if (pv[i] >= confThreshold)
{
List<int> human_pts = new List<int>();
for (int ii = 0; ii < num_pts * 2; ii++)
{
human_pts.Add(0);
}
for (int j = 0; j < num_pts; j++)
{
float score = temp[j * 3] * 2;
if (score >= confThreshold)
{
float x = temp[j * 3 + 1] * image.Cols;
float y = temp[j * 3 + 2] * image.Rows;
human_pts[j * 2] = (int)x;
human_pts[j * 2 + 1] = (int)y;
}
}
results.Add(human_pts);
}
}
result_image = image.Clone();
int start_x = 0;
int start_y = 0;
int end_x = 0;
int end_y = 0;
for (int i = 0; i < results.Count; ++i)
{
for (int j = 0; j < num_pts; j++)
{
int cx = results[i][j * 2];
int cy = results[i][j * 2 + 1];
if (cx > 0 && cy > 0)
{
Cv2.Circle(result_image, new OpenCvSharp.Point(cx, cy), 3, new Scalar(0, 0, 255), -1, LineTypes.AntiAlias);
}
start_x = results[i][connect_list[j * 2] * 2];
start_y = results[i][connect_list[j * 2] * 2 + 1];
end_x = results[i][connect_list[j * 2 + 1] * 2];
end_y = results[i][connect_list[j * 2 + 1] * 2 + 1];
if (start_x > 0 && start_y > 0 && end_x > 0 && end_y > 0)
{
Cv2.Line(result_image, new OpenCvSharp.Point(start_x, start_y), new OpenCvSharp.Point(end_x, end_y), new Scalar(0, 255, 0), 2, LineTypes.AntiAlias);
}
}
start_x = results[i][connect_list[num_pts * 2] * 2];
start_y = results[i][connect_list[num_pts * 2] * 2 + 1];
end_x = results[i][connect_list[num_pts * 2 + 1] * 2];
end_y = results[i][connect_list[num_pts * 2 + 1] * 2 + 1];
if (start_x > 0 && start_y > 0 && end_x > 0 && end_y > 0)
{
Cv2.Line(result_image, new OpenCvSharp.Point(start_x, start_y), new OpenCvSharp.Point(end_x, end_y), new Scalar(0, 255, 0), 2, LineTypes.AntiAlias);
}
}
pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
textBox1.Text = "推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms";
button2.Enabled = true;
}
private void Form1_Load(object sender, EventArgs e)
{
startupPath = System.Windows.Forms.Application.StartupPath;
model_path = "model/e2epose_resnet50_1x3x512x512.onnx";
// 创建输出会话,用于输出模型读取信息
options = new SessionOptions();
options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO;
options.AppendExecutionProvider_CPU(0);// 设置为CPU上运行
// 创建推理模型类,读取本地模型文件
onnx_session = new InferenceSession(model_path, options);//model_path 为onnx模型文件的路径
// 创建输入容器
input_container = new List<NamedOnnxValue>();
image_path = "test_img/1.jpg";
pictureBox1.Image = new Bitmap(image_path);
image = new Mat(image_path);
inpWidth = 512;
inpHeight = 512;
confThreshold = 0.5f;
}
private void pictureBox1_DoubleClick(object sender, EventArgs e)
{
Common.ShowNormalImg(pictureBox1.Image);
}
private void pictureBox2_DoubleClick(object sender, EventArgs e)
{
Common.ShowNormalImg(pictureBox2.Image);
}
SaveFileDialog sdf = new SaveFileDialog();
private void button3_Click(object sender, EventArgs e)
{
if (pictureBox2.Image == null)
{
return;
}
Bitmap output = new Bitmap(pictureBox2.Image);
sdf.Title = "保存";
sdf.Filter = "Images (*.jpg)|*.jpg|Images (*.png)|*.png|Images (*.bmp)|*.bmp";
if (sdf.ShowDialog() == DialogResult.OK)
{
switch (sdf.FilterIndex)
{
case 1:
{
output.Save(sdf.FileName, ImageFormat.Jpeg);
break;
}
case 2:
{
output.Save(sdf.FileName, ImageFormat.Png);
break;
}
case 3:
{
output.Save(sdf.FileName, ImageFormat.Bmp);
break;
}
}
MessageBox.Show("保存成功,位置:" + sdf.FileName);
}
}
}
}
using Microsoft.ML.OnnxRuntime;
using Microsoft.ML.OnnxRuntime.Tensors;
using OpenCvSharp;
using System;
using System.Collections.Generic;
using System.Drawing;
using System.Drawing.Imaging;
using System.Linq;
using System.Windows.Forms;
namespace Onnx_Demo
{
public partial class Form1 : Form
{
public Form1()
{
InitializeComponent();
}
string fileFilter = "*.*|*.bmp;*.jpg;*.jpeg;*.tiff;*.tiff;*.png";
string image_path = "";
string startupPath;
DateTime dt1 = DateTime.Now;
DateTime dt2 = DateTime.Now;
string model_path;
Mat image;
Mat result_image;
SessionOptions options;
InferenceSession onnx_session;
Tensor<float> input_tensor;
List<NamedOnnxValue> input_container;
IDisposableReadOnlyCollection<DisposableNamedOnnxValue> result_infer;
DisposableNamedOnnxValue[] results_onnxvalue;
Tensor<float> result_tensors;
int inpHeight, inpWidth;
float confThreshold;
int[] connect_list = { 0, 1, 0, 2, 1, 3, 2, 4, 3, 5, 4, 6, 5, 6, 5, 7, 7, 9, 6, 8, 8, 10, 5, 11, 6, 12, 11, 12, 11, 13, 13, 15, 12, 14, 14, 16 };
private void button1_Click(object sender, EventArgs e)
{
OpenFileDialog ofd = new OpenFileDialog();
ofd.Filter = fileFilter;
if (ofd.ShowDialog() != DialogResult.OK) return;
pictureBox1.Image = null;
image_path = ofd.FileName;
pictureBox1.Image = new Bitmap(image_path);
textBox1.Text = "";
image = new Mat(image_path);
pictureBox2.Image = null;
}
unsafe private void button2_Click(object sender, EventArgs e)
{
if (image_path == "")
{
return;
}
button2.Enabled = false;
pictureBox2.Image = null;
textBox1.Text = "";
Application.DoEvents();
//读图片
image = new Mat(image_path);
//将图片转为RGB通道
Mat image_rgb = new Mat();
Cv2.CvtColor(image, image_rgb, ColorConversionCodes.BGR2RGB);
Cv2.Resize(image_rgb, image_rgb, new OpenCvSharp.Size(inpHeight, inpWidth));
//输入Tensor
input_tensor = new DenseTensor<float>(new[] { 1, 3, inpHeight, inpWidth });
for (int y = 0; y < image_rgb.Height; y++)
{
for (int x = 0; x < image_rgb.Width; x++)
{
input_tensor[0, 0, y, x] = image_rgb.At<Vec3b>(y, x)[0];
input_tensor[0, 1, y, x] = image_rgb.At<Vec3b>(y, x)[1];
input_tensor[0, 2, y, x] = image_rgb.At<Vec3b>(y, x)[2];
}
}
//将 input_tensor 放入一个输入参数的容器,并指定名称
input_container.Add(NamedOnnxValue.CreateFromTensor("inputimg", input_tensor));
dt1 = DateTime.Now;
//运行 Inference 并获取结果
result_infer = onnx_session.Run(input_container);
dt2 = DateTime.Now;
// 将输出结果转为DisposableNamedOnnxValue数组
results_onnxvalue = result_infer.ToArray();
float[] kpt = results_onnxvalue[0].AsTensor<float>().ToArray();
float[] pv = results_onnxvalue[1].AsTensor<float>().ToArray();
float[] temp = new float[51];
int num_proposal = 341;
int num_pts = 17;
int len = num_pts * 3;
List<List<int>> results = new List<List<int>>();
for (int i = 0; i < num_proposal; i++)
{
Array.Copy(kpt, i * 51, temp, 0, 51);
if (pv[i] >= confThreshold)
{
List<int> human_pts = new List<int>();
for (int ii = 0; ii < num_pts * 2; ii++)
{
human_pts.Add(0);
}
for (int j = 0; j < num_pts; j++)
{
float score = temp[j * 3] * 2;
if (score >= confThreshold)
{
float x = temp[j * 3 + 1] * image.Cols;
float y = temp[j * 3 + 2] * image.Rows;
human_pts[j * 2] = (int)x;
human_pts[j * 2 + 1] = (int)y;
}
}
results.Add(human_pts);
}
}
result_image = image.Clone();
int start_x = 0;
int start_y = 0;
int end_x = 0;
int end_y = 0;
for (int i = 0; i < results.Count; ++i)
{
for (int j = 0; j < num_pts; j++)
{
int cx = results[i][j * 2];
int cy = results[i][j * 2 + 1];
if (cx > 0 && cy > 0)
{
Cv2.Circle(result_image, new OpenCvSharp.Point(cx, cy), 3, new Scalar(0, 0, 255), -1, LineTypes.AntiAlias);
}
start_x = results[i][connect_list[j * 2] * 2];
start_y = results[i][connect_list[j * 2] * 2 + 1];
end_x = results[i][connect_list[j * 2 + 1] * 2];
end_y = results[i][connect_list[j * 2 + 1] * 2 + 1];
if (start_x > 0 && start_y > 0 && end_x > 0 && end_y > 0)
{
Cv2.Line(result_image, new OpenCvSharp.Point(start_x, start_y), new OpenCvSharp.Point(end_x, end_y), new Scalar(0, 255, 0), 2, LineTypes.AntiAlias);
}
}
start_x = results[i][connect_list[num_pts * 2] * 2];
start_y = results[i][connect_list[num_pts * 2] * 2 + 1];
end_x = results[i][connect_list[num_pts * 2 + 1] * 2];
end_y = results[i][connect_list[num_pts * 2 + 1] * 2 + 1];
if (start_x > 0 && start_y > 0 && end_x > 0 && end_y > 0)
{
Cv2.Line(result_image, new OpenCvSharp.Point(start_x, start_y), new OpenCvSharp.Point(end_x, end_y), new Scalar(0, 255, 0), 2, LineTypes.AntiAlias);
}
}
pictureBox2.Image = new Bitmap(result_image.ToMemoryStream());
textBox1.Text = "推理耗时:" + (dt2 - dt1).TotalMilliseconds + "ms";
button2.Enabled = true;
}
private void Form1_Load(object sender, EventArgs e)
{
startupPath = System.Windows.Forms.Application.StartupPath;
model_path = "model/e2epose_resnet50_1x3x512x512.onnx";
// 创建输出会话,用于输出模型读取信息
options = new SessionOptions();
options.LogSeverityLevel = OrtLoggingLevel.ORT_LOGGING_LEVEL_INFO;
options.AppendExecutionProvider_CPU(0);// 设置为CPU上运行
// 创建推理模型类,读取本地模型文件
onnx_session = new InferenceSession(model_path, options);//model_path 为onnx模型文件的路径
// 创建输入容器
input_container = new List<NamedOnnxValue>();
image_path = "test_img/1.jpg";
pictureBox1.Image = new Bitmap(image_path);
image = new Mat(image_path);
inpWidth = 512;
inpHeight = 512;
confThreshold = 0.5f;
}
private void pictureBox1_DoubleClick(object sender, EventArgs e)
{
Common.ShowNormalImg(pictureBox1.Image);
}
private void pictureBox2_DoubleClick(object sender, EventArgs e)
{
Common.ShowNormalImg(pictureBox2.Image);
}
SaveFileDialog sdf = new SaveFileDialog();
private void button3_Click(object sender, EventArgs e)
{
if (pictureBox2.Image == null)
{
return;
}
Bitmap output = new Bitmap(pictureBox2.Image);
sdf.Title = "保存";
sdf.Filter = "Images (*.jpg)|*.jpg|Images (*.png)|*.png|Images (*.bmp)|*.bmp";
if (sdf.ShowDialog() == DialogResult.OK)
{
switch (sdf.FilterIndex)
{
case 1:
{
output.Save(sdf.FileName, ImageFormat.Jpeg);
break;
}
case 2:
{
output.Save(sdf.FileName, ImageFormat.Png);
break;
}
case 3:
{
output.Save(sdf.FileName, ImageFormat.Bmp);
break;
}
}
MessageBox.Show("保存成功,位置:" + sdf.FileName);
}
}
}
}