CARVIEW |
Select Language
HTTP/2 200
date: Wed, 23 Jul 2025 05:52:44 GMT
content-type: text/html; charset=utf-8
vary: X-PJAX, X-PJAX-Container, Turbo-Visit, Turbo-Frame, X-Requested-With,Accept-Encoding, Accept, X-Requested-With
etag: W/"38ed9590615db7d35f5603794d2b5e11"
cache-control: max-age=0, private, must-revalidate
strict-transport-security: max-age=31536000; includeSubdomains; preload
x-frame-options: deny
x-content-type-options: nosniff
x-xss-protection: 0
referrer-policy: no-referrer-when-downgrade
content-security-policy: default-src 'none'; base-uri 'self'; child-src github.githubassets.com github.com/assets-cdn/worker/ github.com/assets/ gist.github.com/assets-cdn/worker/; connect-src 'self' uploads.github.com www.githubstatus.com collector.github.com raw.githubusercontent.com api.github.com github-cloud.s3.amazonaws.com github-production-repository-file-5c1aeb.s3.amazonaws.com github-production-upload-manifest-file-7fdce7.s3.amazonaws.com github-production-user-asset-6210df.s3.amazonaws.com *.rel.tunnels.api.visualstudio.com wss://*.rel.tunnels.api.visualstudio.com objects-origin.githubusercontent.com copilot-proxy.githubusercontent.com proxy.individual.githubcopilot.com proxy.business.githubcopilot.com proxy.enterprise.githubcopilot.com *.actions.githubusercontent.com wss://*.actions.githubusercontent.com productionresultssa0.blob.core.windows.net/ productionresultssa1.blob.core.windows.net/ productionresultssa2.blob.core.windows.net/ productionresultssa3.blob.core.windows.net/ productionresultssa4.blob.core.windows.net/ productionresultssa5.blob.core.windows.net/ productionresultssa6.blob.core.windows.net/ productionresultssa7.blob.core.windows.net/ productionresultssa8.blob.core.windows.net/ productionresultssa9.blob.core.windows.net/ productionresultssa10.blob.core.windows.net/ productionresultssa11.blob.core.windows.net/ productionresultssa12.blob.core.windows.net/ productionresultssa13.blob.core.windows.net/ productionresultssa14.blob.core.windows.net/ productionresultssa15.blob.core.windows.net/ productionresultssa16.blob.core.windows.net/ productionresultssa17.blob.core.windows.net/ productionresultssa18.blob.core.windows.net/ productionresultssa19.blob.core.windows.net/ github-production-repository-image-32fea6.s3.amazonaws.com github-production-release-asset-2e65be.s3.amazonaws.com insights.github.com wss://alive.github.com api.githubcopilot.com api.individual.githubcopilot.com api.business.githubcopilot.com api.enterprise.githubcopilot.com; font-src github.githubassets.com; form-action 'self' github.com gist.github.com copilot-workspace.githubnext.com objects-origin.githubusercontent.com; frame-ancestors 'none'; frame-src viewscreen.githubusercontent.com notebooks.githubusercontent.com; img-src 'self' data: blob: github.githubassets.com media.githubusercontent.com camo.githubusercontent.com identicons.github.com avatars.githubusercontent.com private-avatars.githubusercontent.com github-cloud.s3.amazonaws.com objects.githubusercontent.com release-assets.githubusercontent.com secured-user-images.githubusercontent.com/ user-images.githubusercontent.com/ private-user-images.githubusercontent.com opengraph.githubassets.com copilotprodattachments.blob.core.windows.net/github-production-copilot-attachments/ github-production-user-asset-6210df.s3.amazonaws.com customer-stories-feed.github.com spotlights-feed.github.com objects-origin.githubusercontent.com *.githubusercontent.com; manifest-src 'self'; media-src github.com user-images.githubusercontent.com/ secured-user-images.githubusercontent.com/ private-user-images.githubusercontent.com github-production-user-asset-6210df.s3.amazonaws.com gist.github.com; script-src github.githubassets.com; style-src 'unsafe-inline' github.githubassets.com; upgrade-insecure-requests; worker-src github.githubassets.com github.com/assets-cdn/worker/ github.com/assets/ gist.github.com/assets-cdn/worker/
server: github.com
content-encoding: gzip
accept-ranges: bytes
set-cookie: _gh_sess=c2blhP0VqOi%2BlOAzziVWBFGXDek%2BpQeltsVal8VBeV%2FOSArwPugCr99%2BApBFPyCjWOE4owi8ozB6JCiCaS2HWtnuKc33iee5%2Bn0GgVbDWbOFxTO0JHFv5y99pTbQUaehs9pIOUszSeIBYnSWY%2FP4AzqfyWLOJDRKJGJMe2LCle4v4ZVQdY2uZGVw5Xo6ynPuqx4KqAOTNuibK4UFw2mXiB3WpmJqC7T8FD6QJX4kYRMHtJj2Mi%2FGp6xYgL4UzDOWN2bFX0Y3e5pCTY%2B0bUlRjQ%3D%3D--Xd%2FlBbpqkwsrBMEa--1JhaT8h%2B1Bfo9YiRFdltCA%3D%3D; Path=/; HttpOnly; Secure; SameSite=Lax
set-cookie: _octo=GH1.1.2132385523.1753249963; Path=/; Domain=github.com; Expires=Thu, 23 Jul 2026 05:52:43 GMT; Secure; SameSite=Lax
set-cookie: logged_in=no; Path=/; Domain=github.com; Expires=Thu, 23 Jul 2026 05:52:43 GMT; HttpOnly; Secure; SameSite=Lax
x-github-request-id: 83F2:941DF:47B85D:5E6BA3:688078AB
low level operation api · Tencent/ncnn Wiki · GitHub
Skip to content
Navigation Menu
{{ message }}
-
Notifications
You must be signed in to change notification settings - Fork 4.3k
low level operation api
wiki-sync-bot edited this page Jul 12, 2025
·
1 revision
- input must be fp32 storage without packing
- output is expected to be fp32 storage without packing
void binary_add(const ncnn::Mat& a, const ncnn::Mat& b, ncnn::Mat& c)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = false;
opt.use_packing_layout = false;
ncnn::Layer* op = ncnn::create_layer("BinaryOp");
// set param
ncnn::ParamDict pd;
pd.set(0, 0);// op_type
op->load_param(pd);
op->create_pipeline(opt);
// forward
std::vector<ncnn::Mat> bottoms(2);
bottoms[0] = a;
bottoms[1] = b;
std::vector<ncnn::Mat> tops(1);
op->forward(bottoms, tops, opt);
c = tops[0];
op->destroy_pipeline(opt);
delete op;
}
- input must be fp32 storage without packing
- output is expected to be fp32 storage without packing
void convolution_3x3_boxblur_RGB(const ncnn::Mat& rgb, ncnn::Mat& out)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = false;
opt.use_packing_layout = false;
ncnn::Layer* op = ncnn::create_layer("ConvolutionDepthWise");
// set param
ncnn::ParamDict pd;
pd.set(0, 3);// num_output
pd.set(1, 3);// kernel_w
pd.set(5, 0);// bias_term
pd.set(6, 3*3*3);// weight_data_size
pd.set(7, 3);// group
op->load_param(pd);
// set weights
ncnn::Mat weights[1];
weights[0].create(3*3*3);// weight_data
for (int i=0; i<3*3*3; i++)
{
weights[0][i] = 1.f / 9;
}
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
// forward
op->forward(rgb, out, opt);
op->destroy_pipeline(opt);
delete op;
}
- input must be fp32 storage with/without packing
- output is expected to be fp32 storage packed
void transpose(const ncnn::Mat& in, ncnn::Mat& out)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = false;
opt.use_packing_layout = true;
ncnn::Layer* op = ncnn::create_layer("Permute");
// set param
ncnn::ParamDict pd;
pd.set(0, 1);// order_type
op->load_param(pd);
op->create_pipeline(opt);
ncnn::Mat in_packed = in;
{
// resolve dst_elempack
int dims = in.dims;
int elemcount = 0;
if (dims == 1) elemcount = in.elempack * in.w;
if (dims == 2) elemcount = in.elempack * in.h;
if (dims == 3) elemcount = in.elempack * in.c;
int dst_elempack = 1;
if (op->support_packing)
{
if (elemcount % 8 == 0 && (ncnn::cpu_support_x86_avx2() || ncnn::cpu_support_x86_avx()))
dst_elempack = 8;
else if (elemcount % 4 == 0)
dst_elempack = 4;
}
if (in.elempack != dst_elempack)
{
convert_packing(in, in_packed, dst_elempack, opt);
}
}
// forward
op->forward(in_packed, out, opt);
op->destroy_pipeline(opt);
delete op;
}
// x = (x - mean) / sqrt(var)
- input can be fp32/fp16 storage with/without packing
- output is expected to be fp16 storage packed when supported, or fp32 storage packed otherwise
void normalize(const ncnn::Mat& in, ncnn::Mat& out)
{
ncnn::Option opt;
opt.num_threads = 2;
opt.use_fp16_storage = true;
opt.use_packing_layout = true;
ncnn::Layer* op = ncnn::create_layer("InstanceNorm");
// set param
ncnn::ParamDict pd;
pd.set(0, in.c);// channels
pd.set(1, 0.f);// eps
op->load_param(pd);
// set weights
ncnn::Mat weights[2];
weights[0].create(in.c);// gamma_data
weights[1].create(in.c);// beta_data
weights[0].fill(1.f);
weights[1].fill(0.f);
op->load_model(ncnn::ModelBinFromMatArray(weights));
op->create_pipeline(opt);
ncnn::Mat in_fp16 = in;
if (in.elembits() == 32 && op->support_fp16_storage)
{
cast_float32_to_float16(in, in_fp16, opt);
}
if (in.elembits() == 16 && !op->support_fp16_storage)
{
cast_float16_to_float32(in, in_fp16, opt);
}
ncnn::Mat in_fp16_packed = in_fp16;
{
// resolve dst_elempack
int dims = in_fp16.dims;
int elemcount = 0;
if (dims == 1) elemcount = in_fp16.elempack * in_fp16.w;
if (dims == 2) elemcount = in_fp16.elempack * in_fp16.h;
if (dims == 3) elemcount = in_fp16.elempack * in_fp16.c;
int dst_elempack = 1;
if (op->support_packing)
{
if (elemcount % 8 == 0 && (ncnn::cpu_support_x86_avx2() || ncnn::cpu_support_x86_avx()))
dst_elempack = 8;
else if (elemcount % 4 == 0)
dst_elempack = 4;
}
if (in_fp16.elempack != dst_elempack)
{
convert_packing(in_fp16, in_fp16_packed, dst_elempack, opt);
}
}
// forward
op->forward(in_fp16_packed, out, opt);
op->destroy_pipeline(opt);
delete op;
}
ncnn::VulkanDevice* vkdev = ncnn::get_gpu_device();
ncnn::VkAllocator* blob_vkallocator = vkdev->acquire_blob_allocator();
ncnn::VkAllocator* staging_vkallocator = vkdev->acquire_staging_allocator();
ncnn::VkWeightAllocator* weight_vkallocator = new ncnn::VkWeightAllocator(vkdev);
ncnn::VkWeightStagingAllocator* weight_staging_vkallocator = new ncnn::VkWeightStagingAllocator(vkdev);
// create layer
ncnn::Layer* convolution = ncnn::create_layer("Convolution");
convolution->vkdev = vkdev;
// set option
ncnn::Option opt;
opt.num_threads = 4;
opt.use_vulkan_compute = true;
opt.blob_vkallocator = blob_vkallocator;
opt.workspace_vkallocator = blob_vkallocator;
opt.staging_vkallocator = staging_vkallocator;
// load param
{
ncnn::ParamDict pd;
pd.set(0, outch);
pd.set(1, ksize);
pd.set(6, outch*inch*ksize*ksize);
pd.use_vulkan_compute = 1;
convolution->load_param(pd);
}
// load model
{
ncnn::Mat weights[2];
weights[0] = random_mat(outch*inch*ksize*ksize);
weights[1] = random_mat(outch);
ncnn::ModelBinFromMatArray mb(weights);
convolution->load_model(mb);
}
// create pipeline
convolution->create_pipeline(opt);
// upload model
{
ncnn::VkTransfer cmd(vkdev);
ncnn::Option opt_upload = opt;
opt_upload.blob_vkallocator = weight_vkallocator;
opt_upload.workspace_vkallocator = weight_vkallocator;
opt_upload.staging_vkallocator = weight_staging_vkallocator;
convolution->upload_model(cmd, opt_upload);
cmd.submit_and_wait();
}
ncnn::Mat bottom = random_mat(w, h, inch);
ncnn::Mat top;
// forward
{
ncnn::VkCompute cmd(vkdev);
ncnn::VkMat bottom_gpu;
cmd.record_upload(bottom, bottom_gpu, opt);
ncnn::VkMat top_gpu;
convolution->forward(bottom_gpu, top_gpu, cmd, opt);
cmd.record_download(top_gpu, top, opt);
cmd.submit_and_wait();
}
convolution->destroy_pipeline(opt);
delete convolution;
vkdev->reclaim_blob_allocator(blob_vkallocator);
vkdev->reclaim_staging_allocator(staging_vkallocator);
weight_vkallocator->clear();
weight_staging_vkallocator->clear();
delete weight_vkallocator;
delete weight_staging_vkallocator;
Clone this wiki locally
You can’t perform that action at this time.