Copy the contents of a 3D cudaArray obtained from an OpenGL texture - cuda

I would like to copy the contents of a 3D cudaArray originating from an OpenGL texture to a "classical" array and vice-versa.
Note:
In the following snippets, errors checks are omitted for clarity.
The cudaArray is "allocated" this way:
cudaArray* texture_array {nullptr};
cudaGraphicsResource* resource{nullptr};
cudaGraphicsGLRegisterImage(&resource, texture.id, GL_TEXTURE_3D, cudaGraphicsRegisterFlagsNone);
cudaGraphicsMapResources(1, &resource, cuda_stream);
cudaGraphicsSubResourceGetMappedArray(&texture_array, resource, array_index, mipmap);
This operation is successful as I am able to obtain relevant information using
cudaArrayGetInfo(&description, &extent, &flags, texture_array) and obtain things like the following example here with a 512 x 512 x 122 texture in format uint16.
//C-style pseudo-code
description
{
.x = 16,
.y = 0,
.z = 0,
.w = 0,
.f = cudaChannelFormatKindUnsigned,
};
extent
{
.width = 512,
.height = 512,
.depth = 122
};
flags = 0;
First try: linear array
After reading this answer to a post asking about pitched memory my first try was to use cudaMemcpy3D and simulate a pitched array with pitch being the row length in bytes like this:
std::uint8_t* linear_array{nullptr};
const cudaExtent extent =
{
.width = texture.width * texture.pixel_format_byte_size,
.height = texture.height,
.depth = texture.depth
};
cudaMalloc(&linear_array, extent.width * extent.height * extent.depth);
And then copy to it like that:
const cudaMemcpy3DParms copy_info =
{
.srcArray = texture_array,
.srcPos =
{
.x = 0,
.y = 0,
.z = 0
},
.srcPtr =
{
.ptr = nullptr,
.pitch = 0,
.xsize = 0,
.ysize = 0
},
.dstArray = nullptr,
.dstPos =
{
.x = 0,
.y = 0,
.z = 0
},
.dstPtr =
{
.ptr = linear_array,
.pitch = extent.width,
.xsize = texture.width,
.ysize = texture.height,
},
.extent = extent,
.kind = cudaMemcpyDefault,
};
cudaMemcpy3D(&copy_info)
The code above however produces a cudaErrorInvalidValue upon call to cudaMemcpy3D.
Needless to say, the same thing happens if I reverse the two (source becomes destination and vice-versa).
Second try: pitched array
A bit more complicated for me as I intend to modify the data in a __global__ function, but whatever.
Similarly, I allocate a (real) pitched array like this:
cudaPitchedPtr ptr;
const cudaExtent extent =
{
.width = texture.width * texture.pixel_format_byte_size,
.height = texture.height,
.depth = texture.depth,
};
cudaMalloc3D(&ptr, extent);
And copy to it like this:
const cudaMemcpy3DParms copy_info =
{
.srcArray = texture_array,
.srcPos =
{
.x = 0,
.y = 0,
.z = 0
},
.srcPtr =
{
.ptr = nullptr,
.pitch = 0,
.xsize = 0,
.ysize = 0
},
.dstArray = nullptr,
.dstPos =
{
.x = 0,
.y = 0,
.z = 0
},
.dstPtr = ptr,
.extent = extent,
.kind = cudaMemcpyDefault
};
cudaMemcpy3D(&copy_info);
But I also got cudaErrorInvalidValue upon call to cudaMemcpy3D.
What am I doing wrong?
Is a limitation of the API forbidding me to call cudaMemcpy3D when the array is a texture from a graphics API? If so, what can I do?

After various tests (copying to another cudaArray and other similar things), it appears the problem came from a misunderstanding.
The documentation clearly states:
" If a CUDA array is participating in the copy, the extent is defined in terms of that array's elements".
Thus, copy_info.extent has to be (in my context) the extent retrieved by cudaArrayGetInfo.

Related

How do you ensure background is cleared correctly in WebGPU?

Is there a magic flag I need to set to get correct clearing of the background when rendering with WebGPU? My render works fine except, whatever settings I use, I see the garbage that was last displayed is in the browser window instead of the the background color (if I set a non-zero clearColor in the attachment then it will keep accumulating the same color until it max'es out):
I am using WebGPU via the emscripten CPP interface, and running Chrome Canary on Windows (Version 90.0.4407.0 (Official Build) canary (64-bit)).
My frame render looks like this (called from requestAnimationFrame on the js side):
WGPUSwapChain swapChain = _pWindow->swapChain();
WGPUTextureView backbufferView = wgpuSwapChainGetCurrentTextureView(swapChain);
WGPURenderPassDescriptor renderpassInfo = {};
WGPURenderPassColorAttachmentDescriptor colorAttachment = {};
{
colorAttachment.attachment = backbufferView;
colorAttachment.resolveTarget = nullptr;
colorAttachment.clearColor = { 0.0f, 0.0f, 0.0f, 0.0f };
colorAttachment.loadOp = WGPULoadOp_Clear;
colorAttachment.storeOp = WGPUStoreOp_Store;
renderpassInfo.colorAttachmentCount = 1;
renderpassInfo.colorAttachments = &colorAttachment;
renderpassInfo.depthStencilAttachment = nullptr;
}
WGPUCommandBuffer commands;
{
WGPUCommandEncoder encoder = wgpuDeviceCreateCommandEncoder(_device, nullptr);
WGPURenderPassEncoder pass = wgpuCommandEncoderBeginRenderPass(encoder, &renderpassInfo);
wgpuRenderPassEncoderSetPipeline(pass, _pipeline);
wgpuRenderPassEncoderSetVertexBuffer(pass, 0, _vb, 0, 0);
wgpuRenderPassEncoderDraw(pass, 3, 1, 0, 0);
wgpuRenderPassEncoderEndPass(pass);
wgpuRenderPassEncoderRelease(pass);
commands = wgpuCommandEncoderFinish(encoder, nullptr);
wgpuCommandEncoderRelease(encoder);
}
wgpuQueueSubmit(_queue, 1, &commands);
wgpuCommandBufferRelease(commands);
wgpuTextureViewRelease(backbufferView);
The pipeline is setup with these settings:
WGPURenderPipelineDescriptor descriptor = {};
WGPUBlendDescriptor blendDescriptor = {};
blendDescriptor.operation = WGPUBlendOperation_Add;
blendDescriptor.srcFactor = WGPUBlendFactor_One;
blendDescriptor.dstFactor = WGPUBlendFactor_Zero;
WGPUColorStateDescriptor colorStateDescriptor = {};
colorStateDescriptor.format = _colorFormat;
colorStateDescriptor.alphaBlend = blendDescriptor;
colorStateDescriptor.colorBlend = blendDescriptor;
colorStateDescriptor.writeMask = WGPUColorWriteMask_All;
descriptor.colorStateCount = 1;
descriptor.colorStates = &colorStateDescriptor;
Is there a setting I've missed or is this is a Canary bug?
Had a zero alpha value, that was messing it up :( Changing it to one in the clearColor field works fine:
colorAttachment.clearColor = { 0.0f, 0.0f, 0.0f, 1.0f };

How to create 2D shapes with n-sides in WebGL using keyboard input?

I'm trying to create a program in WebGL that allows you to draw or create shapes of n-size via keyboard input. The user enters in the number of sides to generate a shape with that many sides. So, if you press '3', you will get a triangle, if you press '4', you will get a square, if you press '5', you will get a pentagon, etc.
So far, I've been able to create seperate pieces of code that create triangles, squares, pentagons, etc. without keyboard input but I'm not sure how to go about generating shapes within the same program with n-sides via user/keyboard input. How would I go about doing this?
Examples of my code so far:
Drawing a triangle:
var VSHADER_SOURCE =
'attribute vec4 a_Position;\n' +
'void main() {\n' +
' gl_Position = a_Position;\n' +
'}\n';
var FSHADER_SOURCE =
'void main() {\n' +
' gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n' +
'}\n';
function main() {
var canvas = document.getElementById('webgl');
var gl = getWebGLContext(canvas);
if (!gl) {
console.log('Failed to get the rendering context for WebGL');
return;
}
if (!initShaders(gl, VSHADER_SOURCE, FSHADER_SOURCE)) {
console.log('Failed to initialize shaders.');
return;
}
var n = initVertexBuffers(gl);
if (n < 0) {
console.log('Failed to set the positions of the vertices');
return;
}
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawArrays(gl.TRIANGLES, 0, n);
}
function initVertexBuffers(gl) {
var vertices = new Float32Array([
0, 0.5, -0.5, -0.5, 0.5, -0.5
]);
var n = 3; // The number of vertices
var vertexBuffer = gl.createBuffer();
if (!vertexBuffer) {
console.log('Failed to create the buffer object');
return -1;
}
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
var a_Position = gl.getAttribLocation(gl.program, 'a_Position');
if (a_Position < 0) {
console.log('Failed to get the storage location of a_Position');
return -1;
}
gl.vertexAttribPointer(a_Position, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(a_Position);
return n;
}
Drawing a square:
var VSHADER_SOURCE =
'attribute vec4 a_Position;\n' +
'void main() {\n' +
' gl_Position = a_Position;\n' +
'}\n';
var FSHADER_SOURCE =
'void main() {\n' +
' gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);\n' +
'}\n';
function main() {
var canvas = document.getElementById('webgl');
var gl = getWebGLContext(canvas);
if (!gl) {
console.log('Failed to get the rendering context for WebGL');
return;
}
if (!initShaders(gl, VSHADER_SOURCE, FSHADER_SOURCE)) {
console.log('Failed to initialize shaders.');
return;
}
var n = initVertexBuffers(gl);
if (n < 0) {
console.log('Failed to set the positions of the vertices');
return;
}
gl.clearColor(0, 0, 0, 0);
gl.clear(gl.COLOR_BUFFER_BIT);
gl.drawArrays(gl.TRIANGLE_STRIP, 0, n);
}
function initVertexBuffers(gl) {
var vertices = new Float32Array([
-1, -1, -1, 1, 1, 1, 1, -1, -1, -1,
]);
var n = 5; // The number of vertices
var vertexBuffer = gl.createBuffer();
if (!vertexBuffer) {
console.log('Failed to create the buffer object');
return -1;
}
gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer);
gl.bufferData(gl.ARRAY_BUFFER, vertices, gl.STATIC_DRAW);
var a_Position = gl.getAttribLocation(gl.program, 'a_Position');
if (a_Position < 0) {
console.log('Failed to get the storage location of a_Position');
return -1;
}
gl.vertexAttribPointer(a_Position, 2, gl.FLOAT, false, 0, 0);
gl.enableVertexAttribArray(a_Position);
return n;
}
You can start by writing a function computing vertices positions for a polygon with the number of sides as param.
For example, this one computes the polar coordinates of the polygon within a circle of given radius. You can write your own one.
computePolygonPositions(sides, radius)
{
let positions = []
for (let i=0; i<sides; i++)
{
let i0 = i
let i1 = (i+1) % sides
let theta0 = 2.0 * Math.PI * i0 / sides
let theta1 = 2.0 * Math.PI * i1 / sides
let x0 = radius * Math.cos(theta0)
let y0 = radius * Math.cos(theta0)
let x1 = radius * Math.cos(theta1)
let y1 = radius * Math.cos(theta1)
positions.push(0, 0)
positions.push(x0, y0)
positions.push(x1, y1)
}
return positions
}
Of course, you can upgrade this function to add indices, tex coordinates, colors or anything you need.
Once you're done with it, just call it to create a new vertex buffer that you'll bind on ARRAY_BUFFER, set the layout and enable the position attribute.

Displaying rgb8 pixel data in-browser

I have found similar questions to mine on SO, but have not yet come across an answer to this problem. I have a rgb8 encoded image that I am trying to display in-browser, either in an img or canvas element. I am unsure how to convert this pixel data into an image properly, and was looking for any insight.
For context, the source of this rgb8 data is from a ROS topic with type sensor_msgs/Image. When subscribing to this topic using roslibjs, I am given the following object:
{
data: “MT4+CR…”, (of length 1228800)
encoding: "rgb8",
header: {
frame_id: “camera_color_optical_frame”,
seq: 1455,
stamp: ...timestamp info
},
height: 480,
is_bigendian: 0,
step: 1920,
width: 640
}
With the data string, I have tried displaying it on canvas, converting it to base64, etc. but have not been able to. I know about web_video_server in ROS to help send these images over a port, but that is not an option for me unfortunately - I need to work directly with the data.
Is there a way I can go about displaying this rgb8 data in the browser? Based on the documentation on here, data should be represented as a uint8[] (if that helps).
Thank you so much!
First create a canvas of the correct size and obtain a CanvasRenderingContext2D
// Assuming that imgMes is the image message as linked in question
const can = document.createElement("canvas");
can.width = imgMes.width;
can.height = imgMes.height;
const ctx = can.getcontext("2d");
Then create an image buffer to hold the pixels
const imgData = ctx.createImageData(0, 0, imgMes.width, imgMes.height);
const data = imgData.data;
const inData = imgMes.data;
Then read the data from the image message. Making sure to use the correct order as defined in the flag is_bigendian
var i = 0, j, y = 0, x;
while (y < imgMes.height) {
j = y * imgMes.step;
for (x = 0; x < imgMes.width; x ++) {
if (imgMes.is_bigendian) {
data[i] = inData[j]; // red
data[i + 1] = inData[j + 1]; // green
data[i + 2] = inData[j + 2]; // blue
} else {
data[i + 2] = inData[j]; // blue
data[i + 1] = inData[j + 1]; // green
data[i] = inData[j + 2]; // red
}
data[i + 3] = 255; // alpha
i += 4;
j += 3;
}
y++;
}
The put the pixel data into the canvas;
ctx.putImageData(imgData, 0, 0);
And add the canvas to your HTML
document.body.appendChild(can);
And you are done.
Note that I may have is_bigendian the wrong way around. If so just change the line if (imgMes.is_bigendian) { to if (!imgMes.is_bigendian) {
UPDATE
With more information regarding the data format i was able to extract the image.
I used atob to decode the Base64 string. This returns another string. I then iterated each character in the string, getting the character code to add to each pixel.
It is unclear where the endianess is. My guess is that it is in the decoded string and thus the code swaps bytes for each char code as it makes no sense to have endianess on multiples of 3 bytes
const can = document.createElement("canvas");
can.width = imgMes.width;
can.height = imgMes.height;
const ctx = can.getContext("2d");
const imgData = ctx.createImageData(imgMes.width, imgMes.height);
const data = imgData.data;
const inData = atob(imgMes.data);
var j = 0; i = 4; // j data in , i data out
while( j < inData.length) {
const w1 = inData.charCodeAt(j++); // read 3 16 bit words represent 1 pixel
const w2 = inData.charCodeAt(j++);
const w3 = inData.charCodeAt(j++);
if (!imgMes.is_bigendian) {
data[i++] = w1; // red
data[i++] = w2; // green
data[i++] = w3; // blue
} else {
data[i++] = (w1 >> 8) + ((w1 & 0xFF) << 8);
data[i++] = (w2 >> 8) + ((w2 & 0xFF) << 8);
data[i++] = (w3 >> 8) + ((w3 & 0xFF) << 8);
}
data[i++] = 255; // alpha
}
ctx.putImageData(imgData, 0, 0);
document.body.appendChild(can);
From the example data I got an image of some paving near a road.

Emscripten pass uint8_t array to javascript?

Trying to display a uint8_t* rgb image data buffer to an HTML canvas that was process in C via WASM.
In C I have the following external method:
extern void JS_DisplayRenderData(uint8_t* data, int dataLength);
Then I call the extrnal method like so:
int size = 1280 * 720 * 3;
uint8_t data[size];
memset(data, 255, size);
JS_DisplayRenderData(data, size);
In javaScript I then try to display the buffer like so:
if (typeof mergeInto !== 'undefined') mergeInto(LibraryManager.library,
{
JS_DisplayRenderData: function(data, dataLength)
{
alert("Data Length: " + dataLength);
var c = document.getElementById("canvas");
var ctx = c.getContext("2d");
var imgdata = ctx.getImageData(0, 0, c.width, c.height);
var imgdatalen = imgdata.data.length;
var i2 = 0;
for (var i = 0; i < (imgdatalen / 4); i++)
{
imgdata.data[4*i] = data[i2]; // RED (0-255)
imgdata.data[4*i+1] = data[i2+1]; // GREEN (0-255)
imgdata.data[4*i+2] = data[i2+2]; // BLUE (0-255)
imgdata.data[4*i+3] = 255; // APLHA (0-255)
i2 += 3;
}
ctx.putImageData(imgdata, 0, 0);
}
});
However all I get are black pixels even though it should all be white.
Found the answer tnx to: Struct operations in Javascript through Emscripten
Just needed to state what the buffer type was via: "var a = HEAPU8.subarray(data);"
The method I'm using to invoke JS via C can be found: Webassembly calling JavaScript methods from wasm i.e. whithin c++ code

Trying to understand code in the PolygonSprite.java library file

I am trying to execute this piece of code in my game.
public void draw(Batch batch, float parentAlpha){
PolygonRegion polyReg = new PolygonRegion(connectorTextureRegion, getVertices(), getTriangles());
polygonSprite.setRegion(polyReg);
polygonSprite.draw((PolygonSpriteBatch)batch);
}
In general, it works well but sometimes I'm getting an ArrayIndexOutOfBounds exception caused by the setRegion line. If I dive into the methods code, it fails at the first line in the for loop.
public void setRegion (PolygonRegion region) {
this.region = region;
float[] regionVertices = region.vertices;
float[] textureCoords = region.textureCoords;
if (vertices == null || regionVertices.length != vertices.length) vertices = new float[(regionVertices.length / 2) * 5];
// Set the color and UVs in this sprite's vertices.
float floatColor = color.toFloatBits();
float[] vertices = this.vertices;
for (int i = 0, v = 2, n = regionVertices.length; i < n; i += 2, v += 5) {
vertices[v] = floatColor;
vertices[v + 1] = textureCoords[i];
vertices[v + 2] = textureCoords[i + 1];
}
dirty = true;
}
Now let me know if I'm wrong, but I feel like there might be an issue with the if condition. Why are we checking for this?
regionVertices.length != vertices.length
Whenever I get the exception, it's because the length of the region vertices array and the length of the sprite vertices array are equal.