代码拉取完成,页面将自动刷新
//////////////////////////////////////////////////////////////////////////////
//名称:GOMfcTemplate2
//功能:opencv+mfc 框架
//作者:jsxyhelu(1755311380@qq.com http://jsxyhelu.cnblogs.com)
//组织:GREENOPEN
//日期:2016-10-30
/////////////////////////////////////////////////////////////////////////////
// GOMfcTemplate2Dlg.cpp : 实现文件
//
#include "stdafx.h"
#include "GOMfcTemplate2.h"
#include "GOMfcTemplate2Dlg.h"
#include "afxdialogex.h"
#include <algorithm>
#include <fstream>
#include <iomanip>
#include <vector>
#include <string>
#include <chrono>
#include <memory>
#include <utility>
#include <inference_engine.hpp>
#include <ext_list.hpp>
#include <samples/slog.hpp>
//#include <samples/ocv_common.hpp> //OCV_COMMON加上后会出现较大的混乱
using namespace InferenceEngine;
using namespace std;
using namespace cv;
#ifdef _DEBUG
#define new DEBUG_NEW
#endif
InferenceEngine::InferRequest infer_request;
/**
* @brief Sets image data stored in cv::Mat object to a given Blob object.
* @param orig_image - given cv::Mat object with an image data.
* @param blob - Blob object which to be filled by an image data.
* @param batchIndex - batch index of an image inside of the blob.
*/
template <typename T>
void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, int batchIndex = 0) {
InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
const size_t width = blobSize[3];
const size_t height = blobSize[2];
const size_t channels = blobSize[1];
T* blob_data = blob->buffer().as<T*>();
cv::Mat resized_image(orig_image);
if (static_cast<int>(width) != orig_image.size().width ||
static_cast<int>(height) != orig_image.size().height) {
cv::resize(orig_image, resized_image, cv::Size(width, height));
}
int batchOffset = batchIndex * width * height * channels;
for (size_t c = 0; c < channels; c++) {
for (size_t h = 0; h < height; h++) {
for (size_t w = 0; w < width; w++) {
blob_data[batchOffset + c * width * height + h * width + w] =
resized_image.at<cv::Vec3b>(h, w)[c];
}
}
}
}
//摄像头显示循环,所有关于采集的操作是通过主线程传递控制变量到采集线程,而后由采集线程完成的
DWORD WINAPI CaptureThread(LPVOID lpParameter)
{
CGOMfcTemplate2Dlg* pDlg = (CGOMfcTemplate2Dlg*)lpParameter;
double t_start = (double)cv::getTickCount(); //开始时间
Mat tmpPrydown;
//#pragma omp parallel for
while (true)
{
if (pDlg->b_closeCam)//退出循环
break;
double t = ((double)cv::getTickCount() - t_start) / getTickFrequency();
if (t <= 0.1)//fps =10,主动降低速度
{
Sleep(100);
continue;
}
else
{
t_start = (double)cv::getTickCount();
}
//从directX中获得当前图像并显示出来
IplImage* queryframe = pDlg->cameraDs.QueryFrame();
//在2.0版本中可以强转,在3.0中需要使用函数
Mat camframe = cvarrToMat(queryframe);
pDlg->showImage(camframe, IDC_CAM); //显示原始图像
////根据条件,决定是否采用算法
Mat dst;
Mat img;
Mat tmp;
Mat divideGaussMin;
Mat divideGaussMiddle;
Mat divideGaussMax;
cvtColor(camframe, img, COLOR_BGR2GRAY);
cvtColor(img, img, COLOR_GRAY2BGR);
if (pDlg->bMethod) //这里实现的是灰度转彩色
{
//算法
if (img.empty())
{
return -1;
}
std::string firstOutputName = pDlg->IENetSetup(pDlg->network);
InferRequest infer_request = pDlg->executableNetwork.CreateInferRequest();
Blob::Ptr lrInputBlob = infer_request.GetBlob("data");
matU8ToBlob<float_t>(img, lrInputBlob, 0);//重要的转换函数,第3个参数是batchSize,应该是自己+1的
// ---------------------------推断结果 -------------------------------------------------
infer_request.Infer();//多张图片多次推断
// ---------------------------处理结果-------------------------------------------------------
const Blob::Ptr outputBlob = infer_request.GetBlob(firstOutputName);
const auto outputData = outputBlob->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
size_t numOfImages = outputBlob->getTensorDesc().getDims()[0];
size_t numOfChannels = outputBlob->getTensorDesc().getDims()[1];
int h = outputBlob->getTensorDesc().getDims()[2];
int w = outputBlob->getTensorDesc().getDims()[3];
size_t nunOfPixels = w * h; //写在内存里的结果,还是要拼出来的
std::vector<cv::Mat> imgPlanes{ cv::Mat(h, w, CV_32FC1, &(outputData[0])),
cv::Mat(h, w, CV_32FC1, &(outputData[nunOfPixels])),
cv::Mat(h, w, CV_32FC1, &(outputData[nunOfPixels * 2])) };
for (auto & img : imgPlanes) //本来是平的
img.convertTo(img, CV_8UC1, 255);
cv::merge(imgPlanes, dst);
}
else
{
dst = img.clone();
}
pDlg->showImage(dst, IDC_PIC); //显示网络处理图像
}
return 0;
}
// 用于应用程序“关于”菜单项的 CAboutDlg 对话框
class CAboutDlg : public CDialogEx
{
public:
CAboutDlg();
// 对话框数据
enum { IDD = IDD_ABOUTBOX };
protected:
virtual void DoDataExchange(CDataExchange* pDX); // DDX/DDV 支持
// 实现
protected:
DECLARE_MESSAGE_MAP()
};
CAboutDlg::CAboutDlg() : CDialogEx(CAboutDlg::IDD)
{
}
void CAboutDlg::DoDataExchange(CDataExchange* pDX)
{
CDialogEx::DoDataExchange(pDX);
}
BEGIN_MESSAGE_MAP(CAboutDlg, CDialogEx)
END_MESSAGE_MAP()
// CGOMfcTemplate2Dlg 对话框
CGOMfcTemplate2Dlg::CGOMfcTemplate2Dlg(CWnd* pParent /*=NULL*/)
: CDialogEx(CGOMfcTemplate2Dlg::IDD, pParent)
, m_nCamCount(0)
, m_iCamNum(0)
, b_takeApic(false)
, b_closeCam(false)
{
m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
}
void CGOMfcTemplate2Dlg::DoDataExchange(CDataExchange* pDX)
{
CDialogEx::DoDataExchange(pDX);
DDX_Control(pDX, IDC_COMBO_CAM, m_CBNCamList);
DDX_Control(pDX, IDC_CAM, m_cam);
}
BEGIN_MESSAGE_MAP(CGOMfcTemplate2Dlg, CDialogEx)
ON_WM_SYSCOMMAND()
ON_WM_PAINT()
ON_WM_QUERYDRAGICON()
ON_BN_CLICKED(IDC_BTN_OPENCAM, &CGOMfcTemplate2Dlg::OnBnClickedBtnOpencam)
ON_CBN_SELCHANGE(IDC_COMBO_CAM, &CGOMfcTemplate2Dlg::OnCbnSelchangeComboCam)
ON_BN_CLICKED(IDC_BTN_TAKEPIC, &CGOMfcTemplate2Dlg::OnBnClickedBtnTakepic)
ON_BN_CLICKED(IDC_BTN_CONFIG, &CGOMfcTemplate2Dlg::OnBnClickedBtnConfig)
ON_BN_CLICKED(IDC_BTN_CLOSECAM, &CGOMfcTemplate2Dlg::OnBnClickedBtnClosecam)
ON_WM_DESTROY()
ON_BN_CLICKED(IDC_BTN_RATIO, &CGOMfcTemplate2Dlg::OnBnClickedBtnRatio)
ON_BN_CLICKED(IDC_BUTTON_WRITE, &CGOMfcTemplate2Dlg::OnBnClickedButtonWrite)
ON_BN_CLICKED(IDC_BUTTON_READ, &CGOMfcTemplate2Dlg::OnBnClickedButtonRead)
ON_BN_CLICKED(IDC_BTN_OPENIMAGE, &CGOMfcTemplate2Dlg::OnBnClickedBtnOpenimage)
END_MESSAGE_MAP()
// CGOMfcTemplate2Dlg 消息处理程序
BOOL CGOMfcTemplate2Dlg::OnInitDialog()
{
CDialogEx::OnInitDialog();
// 将“关于...”菜单项添加到系统菜单中。
// IDM_ABOUTBOX 必须在系统命令范围内。
ASSERT((IDM_ABOUTBOX & 0xFFF0) == IDM_ABOUTBOX);
ASSERT(IDM_ABOUTBOX < 0xF000);
CMenu* pSysMenu = GetSystemMenu(FALSE);
if (pSysMenu != NULL)
{
BOOL bNameValid;
CString strAboutMenu;
bNameValid = strAboutMenu.LoadString(IDS_ABOUTBOX);
ASSERT(bNameValid);
if (!strAboutMenu.IsEmpty())
{
pSysMenu->AppendMenu(MF_SEPARATOR);
pSysMenu->AppendMenu(MF_STRING, IDM_ABOUTBOX, strAboutMenu);
}
}
// 设置此对话框的图标。当应用程序主窗口不是对话框时,框架将自动
// 执行此操作
SetIcon(m_hIcon, TRUE); // 设置大图标
SetIcon(m_hIcon, FALSE); // 设置小图标
// TODO: 在此添加额外的初始化代码
m_nCamCount = CCameraDS::CameraCount();//摄像头总数
//获得摄像头数目
char camera_name[1024];
char istr[25];
for(int i=0; i < m_nCamCount; i++)
{
int retval = CCameraDS::CameraName(i, camera_name, sizeof(camera_name) );
sprintf_s(istr, " # %d", i);
strcat_s(camera_name,istr );
CString camstr(camera_name);
if(retval >0)
m_CBNCamList.AddString(camstr);
else
AfxMessageBox(_T("不能获取摄像头的名称"));
}
//初始化显示控件
CRect rect;
GetDlgItem(IDC_CAM)->GetClientRect(&rect);
m_mainframe = Mat::zeros(rect.Height(),rect.Width(),CV_8UC3);
GetDlgItem(IDC_PIC)->GetClientRect(&rect);
m_takepic = Mat::zeros(rect.Height(),rect.Width(),CV_8UC3);
bMethod = false;
network = IENetWork("E:/OpenVINO_modelZoo/road-segmentation-adas-0001.xml", "E:/OpenVINO_modelZoo/road-segmentation-adas-0001.bin");
plugin = IEplugin(network);
executableNetwork = getNetWork(plugin, network);
return TRUE; // 除非将焦点设置到控件,否则返回 TRUE
}
void CGOMfcTemplate2Dlg::OnSysCommand(UINT nID, LPARAM lParam)
{
if ((nID & 0xFFF0) == IDM_ABOUTBOX)
{
CAboutDlg dlgAbout;
dlgAbout.DoModal();
}
else
{
CDialogEx::OnSysCommand(nID, lParam);
}
}
// 如果向对话框添加最小化按钮,则需要下面的代码
// 来绘制该图标。对于使用文档/视图模型的 MFC 应用程序,
// 这将由框架自动完成。
void CGOMfcTemplate2Dlg::OnPaint()
{
if (IsIconic())
{
CPaintDC dc(this); // 用于绘制的设备上下文
SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);
// 使图标在工作区矩形中居中
int cxIcon = GetSystemMetrics(SM_CXICON);
int cyIcon = GetSystemMetrics(SM_CYICON);
CRect rect;
GetClientRect(&rect);
int x = (rect.Width() - cxIcon + 1) / 2;
int y = (rect.Height() - cyIcon + 1) / 2;
// 绘制图标
dc.DrawIcon(x, y, m_hIcon);
}
else
{
//绘制图片显示区域
CDialogEx::OnPaint();
}
}
//当用户拖动最小化窗口时系统调用此函数取得光标
//显示。
HCURSOR CGOMfcTemplate2Dlg::OnQueryDragIcon()
{
return static_cast<HCURSOR>(m_hIcon);
}
void CGOMfcTemplate2Dlg::OnBnClickedBtnOpencam()
{
if (m_nCamCount>=1)//开视频捕获线程
{
HANDLE hThread = NULL;
DWORD dwThreadID = 0;
OnBnClickedBtnClosecam();//首先关闭现有摄像头
bool bret = cameraDs.OpenCamera(m_iCamNum,false,640,480); //尝试打开摄像头
if (bret)
{
b_closeCam = false;
hThread = CreateThread(NULL, 0, CaptureThread, this, 0, &dwThreadID);
}
}
else
{
AfxMessageBox(_T("请确认至少有摄像头连上了"));
}
}
void CGOMfcTemplate2Dlg::OnCbnSelchangeComboCam()
{
m_iCamNum = m_CBNCamList.GetCurSel();
}
void FillBitmapInfo(BITMAPINFO* bmi, int width, int height, int bpp, int origin)
{
assert(bmi && width >= 0 && height >= 0 && (bpp == 8 || bpp == 24 || bpp == 32));
BITMAPINFOHEADER* bmih = &(bmi->bmiHeader);
memset(bmih, 0, sizeof(*bmih));
bmih->biSize = sizeof(BITMAPINFOHEADER);
bmih->biWidth = width;
bmih->biHeight = origin ? abs(height) : -abs(height);
bmih->biPlanes = 1;
bmih->biBitCount = (unsigned short)bpp;
bmih->biCompression = BI_RGB;
if (bpp == 8)
{
RGBQUAD* palette = bmi->bmiColors;
int i;
for (i = 0; i < 256; i++)
{
palette[i].rgbBlue = palette[i].rgbGreen = palette[i].rgbRed = (BYTE)i;
palette[i].rgbReserved = 0;
}
}
}
void CGOMfcTemplate2Dlg::showImage(Mat _src, UINT ID)
{
if (_src.empty())
return;
Mat src;
//对齐图像
int imax = std::max(_src.cols, _src.rows);
imax = (1 + imax / 100) * 100;
resize(_src, src, Size(imax, imax));
//彩色通道
if (src.channels()==1)
{
cvtColor(src, src, COLOR_GRAY2BGR);
}
CRect rect;
GetDlgItem(ID) ->GetClientRect( &rect ); // 在哪里进行显示?
CDC* pDC = GetDlgItem( ID ) ->GetDC();
HDC hDC = pDC ->GetSafeHdc(); // 获取显示控件(位置)的 HDC(设备句柄)
BITMAPINFO bmi = { 0 }; //生成bitmap
bmi.bmiHeader.biSize = sizeof(bmi.bmiHeader);
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biWidth = src.cols;
bmi.bmiHeader.biHeight = src.rows * -1;
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = 24;
m_bitmapBits = new RGBTRIPLE[src.cols * src.rows];
Mat cv_bitmapBits(Size(src.cols, src.rows), CV_8UC3, m_bitmapBits);
src.copyTo(cv_bitmapBits); //拷贝到内存中
if (src.cols > rect.Width())
{
SetStretchBltMode(
hDC, // handle to device context
HALFTONE);
}
else
{
SetStretchBltMode(
hDC, // handle to device context
COLORONCOLOR);
}
::StretchDIBits(hDC, 0, 0, rect.Width(), rect.Height(), 0, 0, src.cols,src.rows, m_bitmapBits,&bmi, DIB_RGB_COLORS, SRCCOPY);//显示在界面上
ReleaseDC( pDC );
}
void CGOMfcTemplate2Dlg::OnBnClickedBtnTakepic()
{
b_takeApic = true;
Sleep(100);//等待采集线程返回 jsxyhelu
//imwrite("takeApic.jpg", camframe);
}
void CGOMfcTemplate2Dlg::OnBnClickedBtnConfig()
{
cameraDs.DisplayFilterProperties();
}
void CGOMfcTemplate2Dlg::OnBnClickedBtnClosecam()
{
//尝试关闭摄像头
b_closeCam = true;
Sleep(100);
cameraDs.CloseCamera();
}
void CGOMfcTemplate2Dlg::OnDestroy()
{
CDialogEx::OnDestroy();
OnBnClickedBtnClosecam();//窗体销毁之前尝试关闭所有摄像头和线程
}
void CGOMfcTemplate2Dlg::OnBnClickedBtnRatio()
{
OnBnClickedBtnClosecam();
if (m_nCamCount>=1)//开视频捕获线程
{
HANDLE hThread = NULL;
DWORD dwThreadID = 0;
OnBnClickedBtnClosecam();//首先关闭现有摄像头
bool bret = cameraDs.OpenCamera(m_iCamNum,true,640,480); //尝试打开摄像头
if (bret)
{
b_closeCam = false;
hThread = CreateThread(NULL, 0, CaptureThread, this, 0, &dwThreadID);
}
}
else
{
AfxMessageBox(_T("请确认至少有摄像头连上了"));
}
}
void CGOMfcTemplate2Dlg::OnBnClickedButtonWrite()
{
if (b_closeCam == false) //打开摄像头时
{
bMethod = !bMethod;
}
else
{
//算法
if (m_mainframe.empty())
{
return;
}
std::string firstOutputName = IENetSetup(network);
InferRequest infer_request = executableNetwork.CreateInferRequest();
Blob::Ptr lrInputBlob = infer_request.GetBlob("data");
matU8ToBlob<float_t>(m_mainframe, lrInputBlob, 0);//重要的转换函数,第3个参数是batchSize,应该是自己+1的
// ---------------------------推断结果 -------------------------------------------------
infer_request.Infer();//多张图片多次推断
// ---------------------------处理结果-------------------------------------------------------
const Blob::Ptr outputBlob = infer_request.GetBlob(firstOutputName);
const auto outputData = outputBlob->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
size_t numOfImages = outputBlob->getTensorDesc().getDims()[0];
size_t numOfChannels = outputBlob->getTensorDesc().getDims()[1];
int h = outputBlob->getTensorDesc().getDims()[2];
int w = outputBlob->getTensorDesc().getDims()[3];
size_t nunOfPixels = w * h; //写在内存里的结果,还是要拼出来的
std::vector<cv::Mat> imgPlanes{ cv::Mat(h, w, CV_32FC1, &(outputData[0])),
cv::Mat(h, w, CV_32FC1, &(outputData[nunOfPixels])),
cv::Mat(h, w, CV_32FC1, &(outputData[nunOfPixels * 2])) };
for (auto & img : imgPlanes) //本来是平的
img.convertTo(img, CV_8UC1, 255);
cv::Mat resultImg;
cv::merge(imgPlanes, resultImg);
showImage(resultImg, IDC_PIC); //显示原始图像
}
}
void CGOMfcTemplate2Dlg::OnBnClickedButtonRead()
{
CString szFilters = _T("*(*.*)|*.*|yml(*.yml)|*.yml||");
CString FilePathName = "";
CFileDialog dlg(TRUE, NULL, NULL, 0, szFilters, this);
if (dlg.DoModal() == IDOK) {
FilePathName = dlg.GetPathName();
}
string filename(FilePathName);
FileStorage fs(filename, FileStorage::READ);
string s1, s2, s3;
if (fs.isOpened())
{
fs["str1"] >> s1;
fs["str2"] >> s2;
fs["str3"] >> s3;
}
fs.release();
}
//打开图片
void CGOMfcTemplate2Dlg::OnBnClickedBtnOpenimage()
{
// 首先关闭摄像头
OnBnClickedBtnClosecam();
//TODO 打开图片窗口
CFileDialog dlg(TRUE, NULL, NULL, 0, "jpg Files (*.jpg)|*.jpg|png Files (*.png)|*.png|bmp Files (*.bmp)|*.bmp|all Files (*.*)|*.*||", this);
if (dlg.DoModal() == IDOK)
{
string FilePathName = dlg.GetPathName();
m_mainframe = imread(FilePathName);
showImage(m_mainframe, IDC_CAM); //显示原始图像
UpdateData(FALSE);
}
}
// 用于推断的函数
Mat CGOMfcTemplate2Dlg::IEInfer(Mat m_mainframe)
{
//初始化IE
// --------------------------- 1.为IE准备插件-------------------------------------
InferencePlugin plugin(PluginDispatcher().getSuitablePlugin(TargetDevice::eCPU));
plugin.AddExtension(std::make_shared<Extensions::Cpu::CpuExtensions>());//Extension,useful
// --------------------------- 2.读取IR模型(xml和bin)---------------------------------
CNNNetReader networkReader;
networkReader.ReadNetwork("./road-segmentation-adas-0001.xml");
networkReader.ReadWeights("./road-segmentation-adas-0001.bin");
CNNNetwork network = networkReader.getNetwork();
// --------------------------- 3. 准备输入输出的------------------------------------------
InputsDataMap inputInfo(network.getInputsInfo());//获得输入信息
if (inputInfo.size() != 1) throw std::logic_error("错误,该模型应该为单输入");
auto lrInputInfoItem = inputInfo["data"]; //开始读入
int w = static_cast<int>(lrInputInfoItem->getTensorDesc().getDims()[3]); //模型要求的输入大小
int h = static_cast<int>(lrInputInfoItem->getTensorDesc().getDims()[2]);
network.setBatchSize(1);//只有1副图片,故BatchSize = 1
//准备输出数据
OutputsDataMap outputInfo(network.getOutputsInfo());//获得输出信息
std::string firstOutputName;
for (auto &item : outputInfo) {
if (firstOutputName.empty()) {
firstOutputName = item.first;
}
DataPtr outputData = item.second;
if (!outputData) {
throw std::logic_error("错误的格式,请检查!");
}
item.second->setPrecision(Precision::FP32);
}
// --------------------------- 4. 读取模型 ------------------------------------------(目视第4步骤最消耗时间)
ExecutableNetwork executableNetwork = plugin.LoadNetwork(network, {});
// --------------------------- 5. 创建推断 -------------------------------------------------
infer_request = executableNetwork.CreateInferRequest();
// --------------------------- 6. 将数据塞入模型 -------------------------------------------------
Blob::Ptr lrInputBlob = infer_request.GetBlob("data"); //data这个名字是我看出来的,实际上这里可以更统一一些
matU8ToBlob<float_t>(m_mainframe, lrInputBlob, 0);//重要的转换函数,第3个参数是batchSize,应该是自己+1的
// --------------------------- 7. 推断结果 -------------------------------------------------
infer_request.Infer();//多张图片多次推断
// --------------------------- 8. 处理结果-------------------------------------------------------
const Blob::Ptr outputBlob = infer_request.GetBlob(firstOutputName);
const auto outputData = outputBlob->buffer().as<PrecisionTrait<Precision::FP32>::value_type*>();
size_t numOfImages = outputBlob->getTensorDesc().getDims()[0];
size_t numOfChannels = outputBlob->getTensorDesc().getDims()[1];
h = outputBlob->getTensorDesc().getDims()[2];
w = outputBlob->getTensorDesc().getDims()[3];
size_t nunOfPixels = w * h; //写在内存里的结果,还是要拼出来的
std::vector<cv::Mat> imgPlanes{ cv::Mat(h, w, CV_32FC1, &(outputData[0])),
cv::Mat(h, w, CV_32FC1, &(outputData[nunOfPixels])),
cv::Mat(h, w, CV_32FC1, &(outputData[nunOfPixels * 2])) };
for (auto & img : imgPlanes) //本来是平的
img.convertTo(img, CV_8UC1, 255);
cv::Mat resultImg;
cv::merge(imgPlanes, resultImg);
return resultImg;
}
CNNNetwork CGOMfcTemplate2Dlg::IENetWork(string strXML, string strBIN)
{
CNNNetReader networkReader;
networkReader.ReadNetwork(strXML);
networkReader.ReadWeights(strBIN);
CNNNetwork network = networkReader.getNetwork();
return network;
}
string CGOMfcTemplate2Dlg::IENetSetup(CNNNetwork network)
{
InputsDataMap inputInfo(network.getInputsInfo());//获得输入信息
BlobMap inputBlobs; //保持所有输入的blob数据
if (inputInfo.size() != 1) throw std::logic_error("错误,该模型应该为单输入");
auto lrInputInfoItem = inputInfo["data"]; //开始读入
int h = static_cast<int>(lrInputInfoItem->getTensorDesc().getDims()[2]);
int w = static_cast<int>(lrInputInfoItem->getTensorDesc().getDims()[3]); //模型要求的输入大小
network.setBatchSize(1);//只有1副图片,故BatchSize = 1
//准备输出数据
OutputsDataMap outputInfo(network.getOutputsInfo());//获得输出信息
std::string firstOutputName;
for (auto &item : outputInfo) {
if (firstOutputName.empty()) {
firstOutputName = item.first;
}
DataPtr outputData = item.second;
if (!outputData) {
throw std::logic_error("错误的格式,请检查!");
}
item.second->setPrecision(Precision::FP32);
}
return firstOutputName;
}
InferencePlugin CGOMfcTemplate2Dlg::IEplugin(CNNNetwork network)
{
InferencePlugin plugin(PluginDispatcher().getSuitablePlugin(TargetDevice::eCPU));
plugin.AddExtension(std::make_shared<Extensions::Cpu::CpuExtensions>());//Extension,useful
return plugin;
}
ExecutableNetwork CGOMfcTemplate2Dlg::getNetWork(InferencePlugin plugin, CNNNetwork network)
{
ExecutableNetwork executableNetwork = plugin.LoadNetwork(network, {});
return executableNetwork;
}
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。