I wrote a generic matrix class for all of my 3d objects, but the translations seem to be really off.
Here is my class:
class GenericMatrix
{
public:
GenericMatrix();
virtual void Identity();
virtual void Scale( float x, float y, float z );
virtual void Scale( const DirectX::XMFLOAT3& s );
virtual DirectX::XMFLOAT3 Scaling( void );
virtual void Translate( float x, float y, float z );
virtual void Translate( const DirectX::XMFLOAT3& t );
virtual DirectX::XMFLOAT3 Translations( void );
virtual void Rotate( float x, float y, float z );
virtual void Rotate( const DirectX::XMFLOAT3& r );
virtual DirectX::XMVECTOR Rotations( void );
virtual DirectX::XMFLOAT3 RotationsPYR( void );
virtual DirectX::XMMATRIX Matrix( bool process = true );
virtual operator DirectX::XMMATRIX()
{
return this->Matrix();
}
virtual void UpdateMatrix( void );
DirectX::XMMATRIX matrix;
DirectX::XMFLOAT3 scale;
DirectX::XMFLOAT3 translation;
DirectX::XMVECTOR rotation;
bool matrixNeedsUpdate;
protected:
float yaw, pitch, roll;
};
And the source:
#include "pch.h"
#include "GenericMatrix.h"
GenericMatrix::GenericMatrix()
: matrixNeedsUpdate( true )
{
this->Identity();
this->pitch = 90.0f;
this->yaw = 0.0f;
this->roll = 0.0f;
}
void GenericMatrix::Identity()
{
this->scale = DirectX::XMFLOAT3( 1.0f, 1.0f, 1.0f );
this->translation = DirectX::XMFLOAT3( 0.0f, 0.0f, 0.0f );
this->rotation = DirectX::XMQuaternionIdentity();
this->pitch = 90.0f;
this->yaw = 0.0f;
this->roll = 0.0f;
matrixNeedsUpdate = true;
}
void GenericMatrix::Scale( float x, float y, float z )
{
this->scale.x += x;
this->scale.y += y;
this->scale.z += z;
this->matrixNeedsUpdate = true;
}
void GenericMatrix::Scale( const DirectX::XMFLOAT3& s )
{ Scale( s.x, s.y, s.z ); }
DirectX::XMFLOAT3 GenericMatrix::Scaling( void )
{ return this->scale; }
void GenericMatrix::Translate( float x, float y, float z )
{
this->translation.x += x;
this->translation.y += y;
this->translation.z += z;
this->matrixNeedsUpdate = true;
}
void GenericMatrix::Translate( const DirectX::XMFLOAT3& t )
{ Translate( t.x, t.y, t.z ); }
DirectX::XMFLOAT3 GenericMatrix::Translations( void )
{ return this->translation; }
void GenericMatrix::Rotate( float x, float y, float z )
{
pitch = x;
yaw = y;
roll = z;
this->rotation = DirectX::XMQuaternionRotationRollPitchYaw( pitch, yaw, roll );
this->matrixNeedsUpdate = true;
}
void GenericMatrix::Rotate( const DirectX::XMFLOAT3& r )
{ Rotate( r.x, r.y, r.z ); }
DirectX::XMVECTOR GenericMatrix::Rotations( void )
{ return this->rotation; }
DirectX::XMFLOAT3 GenericMatrix::RotationsPYR( void )
{
return DirectX::XMFLOAT3( pitch, yaw, roll );
}
DirectX::XMMATRIX GenericMatrix::Matrix( bool process )
{
if ( process && this->matrixNeedsUpdate )
{
UpdateMatrix();
this->matrixNeedsUpdate = false;
}
return this->matrix;
}
void GenericMatrix::UpdateMatrix( void )
{
DirectX::XMVECTOR scaleVec, translateVec;
scaleVec = DirectX::XMLoadFloat3( &this->scale );
translateVec = DirectX::XMLoadFloat3( &this->translation );
this->matrix = DirectX::XMMatrixScalingFromVector( scaleVec ) * DirectX::XMMatrixRotationQuaternion( this->rotation ) * DirectX::XMMatrixTranslationFromVector( translateVec );
}
When I use this as a model matrix, after doing Identity(), then Translate( 0.0f, 0.0f, 5.0f ), my model becomes only visible when I position my camera at [0,0,5], and even then, it's just a flicker. The last bit of 3d programming I have done was with OpenGL 1.x, so there is a good chance I'm doing something weird with the matrices, but haven't found anything that would tell me how to do this otherwise. If there is more information anyone needs, just ask.
update: Some more details - if I set the matrix to Identity() and leave it, i can move the camera and see the model without any problem from any position, meanwhile if I move it anywhere, it messes up the visibility.
I discovered the problem after a few tests. It seems I was using a left-handed coordinate system for my camera, but right-handed for my perspective. In between z 0.9 to -0.9, both left and right handed systems were able to agree that the model should be visible, but when I moved them out of that space, neither matrix could agree on what was visible unless my camera was exactly at the same coordinates.
Moral of the story - remember to consistently use the same coordinate system throughout your code.
Related
I have searched everywhere but got no reference.
I want to use this shader from shadertoy to my libgdx project, so I tried to import simple shader first from: https://www.shadertoy.com/view/XsffRs
I modified it a bit like this but got no success:
/*
* Original shader from: https://www.shadertoy.com/view/XsffRs
*/
#ifdef GL_ES
precision mediump float;
#endif
uniform float time;
uniform vec2 resolution;
// shadertoy emulation
#define iTime time
#define iResolution resolution
// --------[ Original ShaderToy begins here ]---------- //
#define TAU 6.28318531
float C,S;
mat2 rot(float a){
return mat2(C=cos(a),S=sin(a),-S,C);
}
float map(vec3 p) {
p.yz*=rot(p.z*(.03*sin(iTime*3.)));
p.xz*=rot(p.z*(.03*cos(iTime*3.)));
float m=TAU/6.,
l=length(p.xy),
a=mod(atan(p.y,p.x)-p.z*.5+iTime*5.,m)-.5*m;
return length(vec2(a*l,l-2.))-.8;
}
void main(void)
{
vec2 uv = gl_FragCoord.xy / iResolution.xy;
uv-=.5;
uv.x*=iResolution.x/iResolution.y;
vec3 ro=vec3(uv,-3.),rd=normalize(vec3(uv,1.)),mp=ro;
float i=0.;
for (int ii=0;ii<30;++ii) {
i++;
float md=map(mp);
if (abs(md)<.001)break;
mp+=rd*md;
}
float r=i/30.;
float d=length(mp-ro)*.1;
vec3 c=mix(vec3(.2,.5,.7)*d*d,vec3(.2,.4,.8)*r/d,r*r);
c=sqrt(c);
gl_FragColor = vec4(c,1.);
}
Code for ShaderProgram
public void create () {
width = Gdx.graphics.getWidth();
height = Gdx.graphics.getHeight();
batch = new SpriteBatch();
ShaderProgram.pedantic = false;
shader = new ShaderProgram(Gdx.files.internal("vert.vert"), Gdx.files.internal("frag.frag"));
if(!shader.isCompiled())
shader.getLog();
}
public void render () {
time+=Gdx.graphics.getDeltaTime();
shader.begin();
shader.setUniformf("resolution", new Vector2(width, height));
shader.setUniformf("time", time);
shader.end();
batch.begin();
batch.setShader(shader);
batch.end();
}
Shader is running without error but getting black screen.
Edit: It works by drawing dummy texture
Texture t = new Texture(new Pixmap(width,height, Pixmap.Format.RGB565));
with spritebatch, but don't know why is dummy texture required?
In order to see the shader in action you need to draw something, the code is only specifying that the shader is going to be used but nothing is being drawn with it
batch.begin();
batch.setShader(shader);
batch.draw(new Texture(new Pixmap(width,height, Pixmap.Format.RGB565),0,0);
batch.end();
I have a struct Cap which inside I have a thrust::device_vector of another structure. When I compile the code, I get an error which complains about calling a host function (thrust::device_vector<FloatIntPair>) from a device function SphericalFaceManager::makeCaps. When I add __host__ __device__ instead of only __device__ to the member functions and constructors the code then compiles but I receive a warning same as aforementioned error and I think it copies data between host and device. My question is how can I access to device vectors in my classes avoiding any data transfer between CPU and GPU?
Hereafter you can find the code:
struct ParticleID {
Int solver;
Int ngb;
Int oldNgb;
LLInt no;
LLInt masterNo;
__device__ ParticleID() {
solver = -8;
ngb = 0;
oldNgb = 0;
no = 0;
masterNo = -1;
}
};
struct BaseParticle {
Float h;
Float3 pos;
ParticleID id;
__device__ BaseParticle(const Float3& _pos, const Float& _h, const ParticleID& _id) :
h(_h), pos(_pos), id(_id) { }
};
struct FloatIntPair{
Float first;
Int second;
__device__ FloatIntPair(const Float& _first, Int _second) : first(_first), second(_second) { }
__device__ FloatIntPair(const FloatIntPair& sample) : first(sample.first), second(sample.second) { }
static struct {
__device__ bool operator()(const FloatIntPair& a, const FloatIntPair& b) { return a.first < b.first; }
} LessOp;
};
struct Cap {
Float3 eX;
Float3 eY;
Float radius;
Float height;
Float3 center;
Float3 normal;
BaseParticle* aP;
BaseParticle* bP;
thrust::device_vector<FloatIntPair> vertices; // The ordered list of vertices generated from intersections by other circles
__device__ inline Float findAngle(const Float3& vertex) const {
Float result;
Float3 r = (vertex - center);
result = atan2(r|eY,r|eX);
return result += (result < 0.0) * (2.0 * _PI);
}
__device__ void insertVertex(const Float3& vertex, Int id) {
Float theta;
if (!vertices.empty())
theta = findAngle(vertex);
else {
eX = normalVec(vertex - center);
eY = normal ^ eX;
theta = 0.0;
}
vertices.push_back(FloatIntPair(theta,id));
}
__device__ Cap(BaseParticle* _aP, BaseParticle* _bP) : aP(_aP), bP(_bP) {
//Compute normal, center, radius
Float d = mag(bP->pos - aP->pos);
if(d == 0.0){
normal = Vector1(0.0);
center = aP->pos;
radius = height = 0.0;
} else {
normal = (bP->pos - aP->pos) / d;
Float x = (d * d - bP->h * bP->h + aP->h * aP->h) / (2.0 * d);
center = aP->pos + normal * x;
if (x >= aP->h) {
radius = height = 0.0;
return;
}
radius = sqrt(aP->h * aP->h - x * x);
height = min(2.0 * aP->h, aP->h - x);
Float3 vec001 = Vector(0.0,0.0,1.0);
Float3 vec011 = Vector(0.0,1.0,1.0);
eX = normalVec(vec001 ^ normal);
if (mag2(eX) < geoEps()) {
eX = eX = normalVec(vec011 ^ normal);
}
eY = normal ^ eX;
}
}
};
class SphericalFaceManager {
BaseParticle* particle;
Int baseSigma;
public:
thrust::device_vector<Cap> caps;
thrust::device_vector<Float3> vertexPool;
__device__ void makeCaps();
};
__device__ void SphericalFaceManager::makeCaps() {
BaseParticle* aP;
BaseParticle* bP;
Cap aCap(aP,bP);
}
You cannot use thrust vectors (or std::vector) directly in device code. This is mentioned in various other SO questions such as here
If you want to use the data in a thrust::device_vector in device code, you should pass a pointer to the data as a functor initializing parameter. Various other SO questions give examples of this, such as here
Likewise, you cannot use vector methods, e.g. .empty() or .push_back() in device code.
You will need to replace these with ordinary C-style allocators and C-style indexed data access.
For a multi-threaded implementation of push_back in device code, I would recommend something like this. That is a fully worked example that demonstrates how to allocate space for the vector and how each thread can use it for insertVertex for example.
I want to use texture objects (not references) with doubles. The code below works when using floats, but double is not a supported data type.
Can I get around this using 2d textures and if so, how do I set up such a texture?
There is a similar question for texture references, but none for texture objects. Support for double type in texture memory in CUDA
__global__ void my_print(cudaTextureObject_t texObject)
{
printf("%f\n",tex1Dfetch<double>(texObject,0));
return;
}
int main()
{
double i = 0.35;
int numel = 50;
double* d_data;
cudaMalloc(&d_data,numel*sizeof(double));
cudaMemcpy((void*)d_data,&i,1*sizeof(double), cudaMemcpyHostToDevice);
cudaTextureDesc td;
memset(&td, 0, sizeof(td));
td.normalizedCoords = 0;
td.addressMode[0] = cudaAddressModeClamp;
td.readMode = cudaReadModeElementType;
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = d_data;
resDesc.res.linear.sizeInBytes = numel*sizeof(double);
resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
resDesc.res.linear.desc.x = 32;
cudaTextureObject_t texObject = 0;
gpuErrchk(cudaCreateTextureObject(&texObject, &resDesc, &td, NULL));
my_print<<<1,1>>>(texObject);
gpuErrchk(cudaDeviceSynchronize());
return 0;
}
The idea is exactly the same as for texture references. You can access double precision by binding the data to a supported 64 bit type and casting the resulting read to a double. If you modify your code like this:
#include <vector>
#include <cstdio>
static __inline__ __device__ double fetch_double(uint2 p){
return __hiloint2double(p.y, p.x);
}
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void my_print(cudaTextureObject_t texObject)
{
uint2 rval = tex1Dfetch<uint2>(texObject, 0);
double dval = fetch_double(rval);
printf("%f\n", dval);
}
int main()
{
double i = 0.35;
int numel = 50;
std::vector<double> h_data(numel, i);
double* d_data;
cudaMalloc(&d_data,numel*sizeof(double));
cudaMemcpy((void*)d_data, &h_data[0], numel*sizeof(double), cudaMemcpyHostToDevice);
cudaTextureDesc td;
memset(&td, 0, sizeof(td));
td.normalizedCoords = 0;
td.addressMode[0] = cudaAddressModeClamp;
td.readMode = cudaReadModeElementType;
struct cudaResourceDesc resDesc;
memset(&resDesc, 0, sizeof(resDesc));
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = d_data;
resDesc.res.linear.sizeInBytes = numel*sizeof(double);
resDesc.res.linear.desc.f = cudaChannelFormatKindUnsigned;
resDesc.res.linear.desc.x = 32;
resDesc.res.linear.desc.y = 32;
cudaTextureObject_t texObject;
gpuErrchk(cudaCreateTextureObject(&texObject, &resDesc, &td, NULL));
my_print<<<1,1>>>(texObject);
gpuErrchk(cudaDeviceSynchronize());
return 0;
}
i.e. modify the channel description to 64 bits, read a uint2 from the texture object, and then cast it to a double, it should work as you want.
I have used atomicMax() to find the maximum value in the CUDA kernel:
__global__ void global_max(float* values, float* gl_max)
{
int i=threadIdx.x + blockDim.x * blockIdx.x;
float val=values[i];
atomicMax(gl_max, val);
}
It is throwing the following error:
error: no instance of overloaded function "atomicMax" matches the argument list
The argument types are: (float *, float).
atomicMax is not available for float types. But you can implement it via atomicCAS:
__device__ static float atomicMax(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fmaxf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
Based on the CUDA Toolkit Documentation v9.2.148, there are no atomic operations for float.
But we can implement it by mixing atomicMax and atomicMin with signed and unsigned integer casts!
This is a float atomic min:
__device__ __forceinline__ float atomicMinFloat (float * addr, float value) {
float old;
old = (value >= 0) ? __int_as_float(atomicMin((int *)addr, __float_as_int(value))) :
__uint_as_float(atomicMax((unsigned int *)addr, __float_as_uint(value)));
return old;
}
This is a float atomic max:
__device__ __forceinline__ float atomicMaxFloat (float * addr, float value) {
float old;
old = (value >= 0) ? __int_as_float(atomicMax((int *)addr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int *)addr, __float_as_uint(value)));
return old;
}
You need to map float to orderedIntFloat to use atomicMax!
__device__ __forceinline__ int floatToOrderedInt( float floatVal ) {
int intVal = __float_as_int( floatVal );
return (intVal >= 0 ) ? intVal : intVal ^ 0x7FFFFFFF;
}
__device__ __forceinline__ float orderedIntToFloat( int intVal ) {
return __int_as_float( (intVal >= 0) ? intVal : intVal ^ 0x7FFFFFFF);
}
The short answer is that you can't. As you can see from the atomic function documentation, only integer arguments are supported for atomicMax and 64 bit integer arguments are only supported on compute capability 3.5 devices.
I believe the answer given by Xiaojing An is a good solution but there is a minor issue with the negative zero which is mentioned by Robert Crovella in a comment. For example, if *addr = -1.0f and val = -0.0f then after running the atomicMaxFloat function addr will be set to -1.0f but it should be -0.0f, and the atomicMinFloat function will also be wrong in this case. This happens because the >= 0 check returns true for negative 0 but we need it to be false in this case. This case can be fixed by using the signbit function instead:
__device__ __forceinline__ float atomicMinFloat(float* addr, float value) {
float old;
old = !signbit(value) ? __int_as_float(atomicMin((int*)addr, __float_as_int(value))) :
__uint_as_float(atomicMax((unsigned int*)addr, __float_as_uint(value)));
return old;
}
__device__ __forceinline__ float atomicMaxFloat(float* addr, float value) {
float old;
old = !signbit(value) ? __int_as_float(atomicMax((int*)addr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
return old;
}
Note - i would have posted this as a comment to the answer from Xiaojing An but don't have enough reputation.
Of course, it's unclear what will happen with nans or infs in this function but i think it can be used without worrying about that assuming you don't need to handle those cases - the negative 0 is probably the only really worrying case. It also depends on your willingness to accept this kind of hackery where we are making assumptions about the way the floating point values are represented in binary and many people may prefer never to go down this kind of route.
Here's a small test program:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
/*
//these versions fail some of the tests involving negative 0
__device__ __forceinline__ float atomicMinFloat(float* addr, float value) {
float old;
old = value >= 0 ? __int_as_float(atomicMin((int*)addr, __float_as_int(value))) :
__uint_as_float(atomicMax((unsigned int*)addr, __float_as_uint(value)));
return old;
}
__device__ __forceinline__ float atomicMaxFloat(float* addr, float value) {
float old;
old = value >= 0 ? __int_as_float(atomicMax((int*)addr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
return old;
}
*/
__device__ __forceinline__ float atomicMinFloat(float* addr, float value) {
float old;
old = !signbit(value) ? __int_as_float(atomicMin((int*)addr, __float_as_int(value))) :
__uint_as_float(atomicMax((unsigned int*)addr, __float_as_uint(value)));
return old;
}
__device__ __forceinline__ float atomicMaxFloat(float* addr, float value) {
float old;
old = !signbit(value) ? __int_as_float(atomicMax((int*)addr, __float_as_int(value))) :
__uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value)));
return old;
}
__global__ void testKernel(float* testMaxData,
float* testMinData,
const float* testValues,
int numTests)
{
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index >= numTests)
{
return;
}
float val = testValues[index];
atomicMaxFloat(testMaxData + index, val);
atomicMinFloat(testMinData + index, val);
}
void checkCudaErr(cudaError_t cudaStatus)
{
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "CUDA Runtime error: %s\n", cudaGetErrorString(cudaStatus));
}
}
int main()
{
const int numValues = 6;
const int numTests = numValues * numValues;
float testData[numValues] = { 0.0f, -0.0f, 1.0f, -1.0f, 200.0f, -200.0f };
float testValuesMinMaxHost[numTests];
float testValuesHost[numTests];
for (int i = 0; i < numValues; ++i)
{
for (int j = 0; j < numValues; ++j)
{
/*
We will test the values of min(a,b) and max(a,b) for
all values of a and b in the testData array.
*/
testValuesMinMaxHost[numValues * i + j] = testData[i];
testValuesHost[numValues * i + j] = testData[j];
}
}
float* devTestMax = 0;
float* devTestMin = 0;
float* devTestValues = 0;
checkCudaErr(cudaSetDevice(0));
checkCudaErr(cudaMalloc((void**)&devTestMax, numTests * sizeof(float)));
checkCudaErr(cudaMalloc((void**)&devTestMin, numTests * sizeof(float)));
checkCudaErr(cudaMalloc((void**)&devTestValues, numTests * sizeof(float)));
checkCudaErr(cudaMemcpy(devTestMax, testValuesMinMaxHost, numTests * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErr(cudaMemcpy(devTestMin, testValuesMinMaxHost, numTests * sizeof(float), cudaMemcpyHostToDevice));
checkCudaErr(cudaMemcpy(devTestValues, testValuesHost, numTests * sizeof(float), cudaMemcpyHostToDevice));
int blockSize = 128;
testKernel << < (numTests+(blockSize-1))/ blockSize, blockSize >> > (devTestMax, devTestMin, devTestValues, numTests);
checkCudaErr(cudaGetLastError());
float resultsMin[numTests];
float resultsMax[numTests];
checkCudaErr(cudaMemcpy(resultsMin, devTestMin, numTests * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErr(cudaMemcpy(resultsMax, devTestMax, numTests * sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErr(cudaFree(devTestMax));
checkCudaErr(cudaFree(devTestMin));
checkCudaErr(cudaFree(devTestValues));
int fail = 0;
for (int i = 0; i < numTests; ++i)
{
float expectedMax = fmax(testValuesMinMaxHost[i], testValuesHost[i]);
if (resultsMax[i] != expectedMax)
{
printf("fail, expected %f, got %f from max(%f, %f)\n",
expectedMax,
resultsMax[i],
testValuesMinMaxHost[i],
testValuesHost[i]);
fail = 1;
}
float expectedMin = fmin(testValuesMinMaxHost[i], testValuesHost[i]);
if (resultsMin[i] != expectedMin)
{
printf("fail, expected %f, got %f from min(%f, %f)\n",
expectedMin,
resultsMin[i],
testValuesMinMaxHost[i],
testValuesHost[i]);
fail = 1;
}
}
if (fail == 0)
{
printf("all tests passed\n");
}
return 0;
}
This is the syntax for Atomic MAX
int atomicMax(int* address,int val);
But there are exception like atomicAdd which support floats.
i have the following code ,which draws mandelbrot set.I created a menu with an option "black&white" which i want to draw the mandelbrot in black and white color.I haven't figured how to do this (if it can be done this way).mandelbrot is called through the display function ,but how can i call mandelbrot_black?
Also, if someone knows hot to make "zoom" in my code...here...http://stackoverflow.com/questions/5705554/how-to-do-zoom-in-my-code-mandelbrot
void mandelbrot();
void mandelbrot_black();
GLsizei width = 600;
GLsizei height = 600;
GLfloat AspectRatio;
int max = 500;
double xpos=0,ypos=0;
int CLEARFLAG=1;
double xmax = 2.0;
double xmin = -2.0;
double ymax = 2.0;
double ymin = -2.0;
using namespace std;
void display()
{
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluOrtho2D(-2, width, -2, height);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
glClear(GL_COLOR_BUFFER_BIT| GL_DEPTH_BUFFER_BIT );
mandelbrot();
glutSwapBuffers();
}
void reshape(GLsizei w, GLsizei h) {
width=w; height=h;
glViewport(0,0,width,height);
glutPostRedisplay();
}
void setXYpos(int px, int py)
{
xpos=xmin+(xmax-xmin)*px/width;
ypos=ymax-(ymax-ymin)*py/height;
}
void mouse(int button, int state, int x, int y)
{
if(button==GLUT_LEFT_BUTTON && state==GLUT_DOWN) {CLEARFLAG=0; setXYpos(x,y);}
glutPostRedisplay();
}
void mandelbrot()
{
...}
void mandelbrot_black(){
...}
void mymenu(int n)
{
switch(n) {
case 1: zoom_in();break;
case 2: zoom_out();break;
case 3: mandelbrot_black();break;
case 4: exit(0);
}
glutPostRedisplay();
}
void SetupMenu()
{
glutCreateMenu(mymenu);
glutAddMenuEntry("zoom in",1);
glutAddMenuEntry("zoom out",2);
glutAddMenuEntry("black&white",3);
glutAddMenuEntry("exit",4);
glutAttachMenu(GLUT_RIGHT_BUTTON);
}
int main(int argc, char *argv[])
{
glutInit(&argc, argv);
glutInitWindowSize(600, 600);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGB);
glutCreateWindow("Mandelbrot");
glutDisplayFunc(display);
glutReshapeFunc(reshape);
glutMainLoop();
return 0;
}
Your display function needs to draw either mandelbrot() or mandelbrot_black() depending on the current state (which can/should be a global variable).
//in global scope
static bool black = false;
...
//in display()
if(black)
mandelbrot_black();
else
mandelbrot();
Change black accordingly in mymenu(). You still need to attach your menu to a mouse button and call SetupMenu().