How do you create a torus in libgdx ? Modelbuilder doesn't support it.
I have to create a torus in code, and cant load any objects.
With libGDX to create custom Models you'd use the MeshBuilder.
Via the MeshBuilder.vertex(...) method you can add a vertex with the necessary information, one by one. Basically you'll need two nested loops and look up the necessary formulas for the torus here.
You have to wrap it in MeshBuilder.begin(...) and MeshBuilder.end().
MeshBuilder.end() will return a Mesh, which you can then pass to ModelBuilder.fromMesh(mesh) to get the Model you need.
Find resolution, how create torus in libgdx. May will be helpfull.
private void createTorus (int glMaterial, float X, float Y, float Z, float widthR,
float height, int divisionsU, int divisionsV, float r, float g, float b, float a) {
ModelBuilder modelBuilder = new ModelBuilder();
modelBuilder.begin();
MeshPartBuilder builder = modelBuilder.part("torus", glMaterial, Usage.Position |
Usage.Normal, new Material(ColorAttribute.createDiffuse(r, g, b, a)));
builder.setColor(Color.LIGHT_GRAY);
VertexInfo curr1 = vertTmp3.set(null, null, null, null);
curr1.hasUV = curr1.hasPosition = curr1.hasNormal = true;
VertexInfo curr2 = vertTmp4.set(null, null, null, null);
curr2.hasUV = curr2.hasPosition = curr2.hasNormal = true;
short i1, i2, i3 = 0, i4 = 0;
int i, j, k;
double s, t, twopi;
twopi = 2 * Math.PI;
for (i = 0; i < divisionsV; i++) {
for (j = 0; j <= divisionsU; j++) {
for (k = 1; k >= 0; k--) {
s = (i + k) % divisionsV + 0.5;
t = j % divisionsU;
curr1.position.set(
(float) ((widthR+height*Math.cos(s * twopi / divisionsV))*Math.cos(t * twopi / divisionsU)),
(float) ((widthR+height*Math.cos(s*twopi/divisionsV))*Math.sin(t * twopi / divisionsU)),
(float) (height * Math.sin(s * twopi / divisionsV)));
curr1.normal.set(curr1.position).nor();
k--;
s = (i + k) % divisionsV + 0.5;
curr2.position.set(
(float) ((widthR+height*Math.cos(s * twopi / divisionsV))*Math.cos(t * twopi / divisionsU)),
(float) ((widthR+height*Math.cos(s*twopi/divisionsV))*Math.sin(t * twopi / divisionsU)),
(float) (height * Math.sin(s * twopi / divisionsV)));
curr2.normal.set(curr1.normal);
//curr2.uv.set((float) s, 0);
i1 = builder.vertex(curr1);
i2 = builder.vertex(curr2);
builder.rect(i4, i2, i1, i3);
i4 = i2;
i3 = i1;
}
}
}
torus_Model = modelBuilder.end();
torus_Instances = new ModelInstance(torus_Model);
}
Related
I have a question about image convolution in CUDA. When I test it with small maxtrix (16*16) evething is ok. But with larger matrix, the result is always change when I run.
I think problem is 2 for loops into kernel.
__global__ void image_convolution_kernel(float *input, float *out, float *kernelConv,
int img_width, const int img_height,
const int kernel_width, const int kernel_height )
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
float sum = 0;
for ( int j = 0; j < kernel_height; j++ )
{
for ( int i = 0; i < kernel_width; i++ )
{
int dX = x + i - kernel_width / 2;
int dY = y + j - kernel_height / 2;
if ( dX < 0 )
dX = 0;
if ( dX >= img_width )
dX = img_width - 1;
if ( dY < 0 )
dY = 0;
if ( dY >= img_height )
dY = img_height - 1;
const int idMat = j * kernel_width + i;
const int idPixel = dY * img_width + dX;
sum += (float)input[idPixel] * kernelConv[idMat];
}
}
const int idOut = y * img_width + x;
out[idOut] = abs(sum);
}
void image_convolution(float * input,float* output, int img_height, int img_width)
{
int kernel_height = 3;
int kernel_width = 3;
float kernel[] ={ 0,-0.25,0,
-0.25,1,-0.25,
0,-0.25,0
};
float * mask = new float[kernel_height*kernel_width];
for (int i = 0; i < kernel_height*kernel_width; i++)
{
mask[i] = kernel[i];
}
float * d_input, * d_output, * d_kernel;
cudaMalloc(&d_input, img_width*img_height*sizeof(float));
cudaMalloc(&d_output, img_width*img_height*sizeof(float));
cudaMalloc(&d_kernel, kernel_height*kernel_width*sizeof(float));
cudaMemcpy(d_input, input, img_width*img_height*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_kernel, mask, kernel_height*kernel_width*sizeof(float), cudaMemcpyHostToDevice);
dim3 blocksize(16,16);
dim3 gridsize;
gridsize.x=(img_width+blocksize.x-1)/blocksize.x;
gridsize.y=(img_height+blocksize.y-1)/blocksize.y;
image_convolution_kernel<<<gridsize,blocksize>>>(d_input,d_output,d_kernel,img_width,img_height,kernel_width,kernel_height);
cudaMemcpy(output, d_output, img_width*img_height*sizeof(float), cudaMemcpyDeviceToHost);
for (int i=0; i < img_width*img_height; i++)
{
printf("%d, ",(int)output[i]);
}
printf("\n\n");
}
Here is my result, I test it with 24*24 image, I run it 2 time, and I also write simple function to compared the output.
And here is result when I compare the output, there are 32 differents,at index 240, 241 ....
You have made a fairly common error in your program. When you create a grid of threads like this:
dim3 blocksize(16,16);
dim3 gridsize;
gridsize.x=(img_width+blocksize.x-1)/blocksize.x;
gridsize.y=(img_height+blocksize.y-1)/blocksize.y;
you are intentionally creating (usually) extra threads in each dimension, so as to fully cover the problem space (i.e. image size). There is nothing wrong with this.
However, it means we will be launching extra threads, which are outside the valid image dimension. We must ensure that these threads do nothing. The usual approach is to add a thread check to the kernel, so that threads outside the valid image dimensions do nothing. Here's a modified kernel and fully worked example showing that change:
$ cat t1219.cu
#include <iostream>
#include <cstdlib>
const int iw = 1025;
const int ih = 1025;
const int rng = 10;
__global__ void image_convolution_kernel(float *input, float *out, float *kernelConv,
int img_width, const int img_height,
const int kernel_width, const int kernel_height )
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if ((x < img_width) && (y < img_height)){ // thread check
float sum = 0;
for ( int j = 0; j < kernel_height; j++ )
{
for ( int i = 0; i < kernel_width; i++ )
{
int dX = x + i - kernel_width / 2;
int dY = y + j - kernel_height / 2;
if ( dX < 0 )
dX = 0;
if ( dX >= img_width )
dX = img_width - 1;
if ( dY < 0 )
dY = 0;
if ( dY >= img_height )
dY = img_height - 1;
const int idMat = j * kernel_width + i;
const int idPixel = dY * img_width + dX;
sum += (float)input[idPixel] * kernelConv[idMat];
}
}
const int idOut = y * img_width + x;
out[idOut] = abs(sum);
}
}
void image_convolution(float * input,float* output, int img_height, int img_width)
{
int kernel_height = 3;
int kernel_width = 3;
float kernel[] ={ 0,-0.25,0,
-0.25,1,-0.25,
0,-0.25,0
};
float * mask = new float[kernel_height*kernel_width];
for (int i = 0; i < kernel_height*kernel_width; i++)
{
mask[i] = kernel[i];
}
float * d_input, * d_output, * d_kernel;
cudaMalloc(&d_input, img_width*img_height*sizeof(float));
cudaMalloc(&d_output, img_width*img_height*sizeof(float));
cudaMalloc(&d_kernel, kernel_height*kernel_width*sizeof(float));
cudaMemcpy(d_input, input, img_width*img_height*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_kernel, mask, kernel_height*kernel_width*sizeof(float), cudaMemcpyHostToDevice);
dim3 blocksize(16,16);
dim3 gridsize;
gridsize.x=(img_width+blocksize.x-1)/blocksize.x;
gridsize.y=(img_height+blocksize.y-1)/blocksize.y;
image_convolution_kernel<<<gridsize,blocksize>>>(d_input,d_output,d_kernel,img_width,img_height,kernel_width,kernel_height);
cudaMemcpy(output, d_output, img_width*img_height*sizeof(float), cudaMemcpyDeviceToHost);
}
int main(){
float *in, *out;
int is = ih*iw;
in = new float[is];
out = new float[is];
for (int i = 0; i < is; i++) {in[i] = rand()%rng; out[i] = -1;}
image_convolution(in,out, ih, iw);
for (int iy = 1; iy < ih-1; iy++)
for (int ix = 1; ix < iw-1; ix++){
float temp = abs(-0.25 * (in[iy*iw + ix -1] + in[iy*iw + ix +1] + in[(iy-1)*iw + ix] + in[(iy+1)*iw + ix]) + in[iy*iw+ix]);
if (out[iy*iw+ix] != temp) {std::cout << "mismatch x: " << ix << " y: " << iy << " was: " << out[iy*iw+ix] << " should be: " << temp << std::endl; return 1;}}
return 0;
}
$ nvcc -o t1219 t1219.cu
$ cuda-memcheck ./t1219
========= CUDA-MEMCHECK
========= ERROR SUMMARY: 0 errors
$
For image dimensions which are exact multiples of the block size (16,16) (which was true for my previous test case) this problem won't show up -- the code will work correctly. For all other test cases, we need such a thread check.
I try to make a small code to generate numbers and return the result in array but once I run this code it's not working, I have tried to use Nsight debugger to understand where is my problem but it freezes and closes immediately.
Could you help me please to understand where is the problem in this code?
__global__ void mykernel( int* PF_tmp, int* PL_tmp, int* QF_tmp, int* QL_tmp,
int m[2], int p[5], int q[5], int i, int* n,
int out[10][5], int N)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int idx = blockIdx.x;
int idy = blockIdx.y;
int w = idx/100;
int x = idx%100;
int y = idy;
int z = threadIdx.x;
int len = ((i * 2) + 5);
// Fill PF_tmp & QF_tmp
if( i > 0){
for(int k = 0; k < (i * 2); k++)
{
p[k] = PF_tmp[k];
q[k] = QF_tmp[k];
}
}
// Fill X
if( x > 10)
{
p[(i*2)] = (x - (x % 10)) / 10;
p[(i*2)+1] = x % 10;
}else{
p[(i*2)] = 0;
p[(i*2)+1] = x;
}
// Fill Y
if( y > 10)
{
q[(i*2)] = (y - (y % 10)) / 10;
q[(i*2)+1] = y % 10;
}else{
q[(i*2)] = 0;
q[(i*2)+1] = y;
}
// Fill m
p[(i * 2)+2] = m[0];
q[(i * 2)+2] = m[1];
// Fill W
if( w > 10)
{
p[(i*2)+3] = (w - (w % 10)) / 10;
p[(i*2)+4] = w % 10;
}else{
p[(i*2)+3] = 0;
p[(i*2)+4] = w;
}
// Fill Z
if( z > 10)
{
q[(i*2)+3] = (z - (z % 10)) / 10;
q[(i*2)+4] = z % 10;
}else{
q[(i*2)+3] = 0;
q[(i*2)+4] = z;
}
// Fill PL_tmp & QL_tmp
if( i > 0)
{
for(int k = 0; k < (i * 2); k++)
{
p[(len-(i * 2))+k] = PL_tmp[k];
q[(len-(i * 2))+k] = QL_tmp[k];
}
}
if(id<10)
{
for(int k =0; k<5; k++)
out[id][k] = p[k];
}
}
int main()
{
cudaError err;
dim3 blocks(10000, 100);
dim3 threads(100);
int m[2] = {4,5};
int hst_out[10][5];
int p[5];
int q[5];
err = cudaMalloc((void **)&p, 5);
err = cudaMalloc((void **)&q, 5);
err = cudaMalloc((void **)&hst_out, 50);
mykernel<<<blocks, threads>>>(NULL, NULL, NULL, NULL, m, p, q, 0, NULL, hst_out, 100000000);
return 0;
}
The error very obvious, it is all C programming.
when you declare
int m[2] = {4,5};
int hst_out[10][5];
int p[5];
int q[5];
now hst_out, p, q are not a pointer, but later it is used as a pointer:
err = cudaMalloc((void **)&p, 5);
err = cudaMalloc((void **)&q, 5);
err = cudaMalloc((void **)&hst_out, 50);
so u should have declare it initially as a pointer instead, eg,
int *p;
and used it as this way:
err = cudaMalloc((void **)&p, 5*sizeof(int));
And notice too that the size you have declared is just 5 bytes....whereas I declared it as 5*sizeof(int).
For more example see:
http://cuda-programming.blogspot.sg/2013/03/how-to-avoid-uses-of-cudamalloc-in.html
I'm trying to shake the whole screen.
Previously i'm using the coding below:
Shaky3D *shake = Shaky3D::create(0.2f, Size(1,1), 10, false);
this->runAction(Sequence::create(shake, NULL));
But now i'm using Cocos2d-x 3.2, and i tried the following but it's not working. How should i code correctly? Thanks.
NodeGrid* nodeGrid = NodeGrid::create();
this->addChild(nodeGrid);
auto shake = Shaky3D::create(0.2f, Size(1,1), 20, false);
nodeGrid->runAction(Sequence::create(shake, NULL));
Well, i found another way of doing it.
void GameScene::shakeScreen(float dt)
{
float randx = rangeRandom( -50.0f, 50.0 );
float randy = rangeRandom( -50.0f, 50.0 );
this->setPosition(Point(randx, randy));
this->setPosition(Point(initialPos.x + randx, initialPos.y + randy));
SET_SHAKE_DURATION -= 1;
if (SET_SHAKE_DURATION <= 0)
{
this->setPosition(Point(initialPos.x, initialPos.y));
this->unschedule(schedule_selector(GameScene::shakeScreen));
}
}
float GameScene::rangeRandom( float min, float max )
{
float rnd = ((float)rand()/(float)RAND_MAX);
return rnd*(max-min)+min;
}
well here is an even better one(from unity)
float noise(int x, int y) {
int n = x + y * 57;
n = (n << 13) ^ n;
return (1.0 - ((n * ((n * n * 15731) + 789221) + 1376312589) & 0x7fffffff) / 1073741824.0);
}
bool TestScene::onTouchBegan(cocos2d::Touch *touch, cocos2d::Event *event) {
//experiment more with these four values to get a rough or smooth effect!
float interval = 0.f;
float duration = 0.5f;
float speed = 2.0f;
float magnitude = 1.0f;
static float elapsed = 0.f;
this->schedule([=](float dt) {
float randomStart = random(-1000.0f, 1000.0f);
elapsed += dt;
float percentComplete = elapsed / duration;
// We want to reduce the shake from full power to 0 starting half way through
float damper = 1.0f - clampf(2.0f * percentComplete - 1.0f, 0.0f, 1.0f);
// Calculate the noise parameter starting randomly and going as fast as speed allows
float alpha = randomStart + speed * percentComplete;
// map noise to [-1, 1]
float x = noise(alpha, 0.0f) * 2.0f - 1.0f;
float y = noise(0.0f, alpha) * 2.0f - 1.0f;
x *= magnitude * damper;
y *= magnitude * damper;
this->setPosition(x, y);
if (elapsed >= duration)
{
elapsed = 0;
this->unschedule("Shake");
this->setPosition(Vec2::ZERO);
}
}, interval, CC_REPEAT_FOREVER, 0.f, "Shake");
return true;
}
I have a cpp file where I am creating an image and store the data to myOutput pointer:
int Rows = 80;
int Cols = 64;
for (int i = 0; i < Rows; i++ ){
for (int j = 0; j < Cols; j++ )
{
X = 1.0f * ((float) i - (float) Rows / 2) / (float) Rows;
Y = 2.0f * ((float) j - (float) Cols / 2) / (float) Cols;
.....
myOutput->Re = cosf( ......);
myOutput->Im = sinf(.......);
++myOutput;
}
}
Then , in cuda I am reading like:
int bx = blockIdx.x , by = blockIdx.y;
int tx = threadIdx.x , ty = threadIdx.y;
int RowIdx = ty + by * TILE_WIDTH;
int ColIdx = tx + bx * TILE_WIDTH;
Index = RowIdx * Cols + ColIdx;
//copy input data to shared memory
myshared[ty+1][tx+1] = *( devInputArray + Index );
(So , the myOutput generated from cpp is loaded in devInputArray).
Now , I want to process many images simultaneously.
So, in cpp ,the following additions must be made (for 2 images for example) :
int ImagesNb = 2;
for ( ImagesIdx = 0; ImagesIdx < ImagesNb; ImagesIdx++ ){
for (int i = 0; i < Rows; i++ ){
for (int j = 0; j < Cols; j++ )
{
X = (ImagesIdx + 1) * 1.0f * ((float) i - (float) Rows / 2) / (float) Rows;
Y = (ImagesIdx + 1) * 2.0f * ((float) j - (float) Cols / 2) / (float) Cols;
...
But , now I am not sure how to read the data from cuda.
I don't know how to take into account the number of images.
Before , I had a pointer which contained data (80 x 64) .
Now , it still contains the same dimension of every image but with more data.
I must change this:
Index = RowIdx * Cols + ColIdx;
//copy input data to shared memory
myshared[ty+1][tx+1] = *( devInputArray + Index );
but I can't figure how!
I hope it is clear!
UPDATED
I am trying something like this:
int bx = blockIdx.x , by = blockIdx.y , bz = blockIdx.z;
int tx = threadIdx.x , ty = threadIdx.y , tz = threadIdx.z;
int RowIdx = ty + by * TILE_WIDTH;
int ColIdx = tx + bx * TILE_WIDTH;
int ImagesIdx = tz + bz * blockDim.z;
Index = RowIdx * Cols + ColIdx + Rows * Cols * ImagesIdx
and :
dim3 dimGrid( ImagesNb * (Cols / TILE_WIDTH) , ImagesNb * (Rows / TILE_WIDTH) , ImagesNb);
dim3 dimBlock( TILE_WIDTH , TILE_WIDTH , 2);
but if I try for 2 images I am not getting right results..
Ok, for using a number of images you must add an extra dimension to shared variable in order to hold the number of images.
Hi i try to create an identity matrix with CUDA but the output is just : zeros
__global__ void initIdentityGPU(int *devMatrix, int numR, int numC) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x * y;
for (int i = 0; i < x ; i++) {
for (int j = 0; j < numR; j++) {
if (i == j)
devMatrix[offset] = 1;
else
devMatrix[offset] = 0;
}
}
}
Why only it puts 0s ?
The simplest way how to do it is:
__global__ void initIdentityGPU(int **devMatrix, int numR, int numC) {
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
if(y < numR && x < numC) {
if(x == y)
devMatrix[y][x] = 1;
else
devMatrix[y][x] = 0;
}
}
and you launch it as:
dim3 blockDim(BLOCK_DIM_X, BLOCK_DIM_Y);
dim3 gridDim((numC + BLOCK_DIM_X - 1) / BLOCK_DIM_X, (numR + BLOCK_DIM_Y - 1) / BLOCK_DIM_Y);
initIdentityGPU<<<gridDim, blockDim>>>(matrix, numR, numC);
It simply runs as many threads as matrix cells, each thread obtains the coordinates of its cell and in a case the cell is in the diagonal of matrix it assigns 1 or 0 otherwise. Note the code is untested.