protot/3rdparty/bgfx/examples/common/nanovg/nanovg_bgfx.cpp

1268 lines
32 KiB
C++
Raw Normal View History

2016-08-29 22:31:11 +02:00
/*
2018-02-03 17:39:28 +01:00
* Copyright 2011-2018 Branimir Karadzic. All rights reserved.
2016-08-29 22:31:11 +02:00
* License: https://github.com/bkaradzic/bgfx#license-bsd-2-clause
*/
//
// Copyright (c) 2009-2013 Mikko Mononen memon@inside.org
//
// This software is provided 'as-is', without any express or implied
// warranty. In no event will the authors be held liable for any damages
// arising from the use of this software.
// Permission is granted to anyone to use this software for any purpose,
// including commercial applications, and to alter it and redistribute it
// freely, subject to the following restrictions:
// 1. The origin of this software must not be misrepresented; you must not
// claim that you wrote the original software. If you use this software
// in a product, an acknowledgment in the product documentation would be
// appreciated but is not required.
// 2. Altered source versions must be plainly marked as such, and must not be
// misrepresented as being the original software.
// 3. This notice may not be removed or altered from any source distribution.
//
#define NVG_ANTIALIAS 1
#include <stdlib.h>
#include <math.h>
#include "nanovg.h"
#include <bgfx/bgfx.h>
2017-04-11 08:16:10 +02:00
#include <bgfx/embedded_shader.h>
2016-08-29 22:31:11 +02:00
#include <bx/bx.h>
#include <bx/allocator.h>
2016-11-10 21:53:08 +01:00
#include <bx/uint32_t.h>
2016-08-29 22:31:11 +02:00
BX_PRAGMA_DIAGNOSTIC_IGNORED_MSVC(4244); // warning C4244: '=' : conversion from '' to '', possible loss of data
#include "vs_nanovg_fill.bin.h"
#include "fs_nanovg_fill.bin.h"
2017-04-11 08:16:10 +02:00
static const bgfx::EmbeddedShader s_embeddedShaders[] =
{
BGFX_EMBEDDED_SHADER(vs_nanovg_fill),
BGFX_EMBEDDED_SHADER(fs_nanovg_fill),
BGFX_EMBEDDED_SHADER_END()
};
namespace
{
2016-08-29 22:31:11 +02:00
static bgfx::VertexDecl s_nvgDecl;
enum GLNVGshaderType
{
NSVG_SHADER_FILLGRAD,
NSVG_SHADER_FILLIMG,
NSVG_SHADER_SIMPLE,
NSVG_SHADER_IMG
};
// These are additional flags on top of NVGimageFlags.
enum NVGimageFlagsGL {
NVG_IMAGE_NODELETE = 1<<16, // Do not delete GL texture handle.
};
struct GLNVGtexture
{
bgfx::TextureHandle id;
int width, height;
int type;
int flags;
};
2017-04-11 08:16:10 +02:00
struct GLNVGblend
{
uint64_t srcRGB;
uint64_t dstRGB;
uint64_t srcAlpha;
uint64_t dstAlpha;
};
2016-08-29 22:31:11 +02:00
enum GLNVGcallType
{
GLNVG_FILL,
GLNVG_CONVEXFILL,
GLNVG_STROKE,
GLNVG_TRIANGLES,
};
struct GLNVGcall
{
int type;
int image;
int pathOffset;
int pathCount;
int vertexOffset;
int vertexCount;
int uniformOffset;
2017-04-11 08:16:10 +02:00
GLNVGblend blendFunc;
2016-08-29 22:31:11 +02:00
};
struct GLNVGpath
{
int fillOffset;
int fillCount;
int strokeOffset;
int strokeCount;
};
struct GLNVGfragUniforms
{
float scissorMat[12]; // matrices are actually 3 vec4s
float paintMat[12];
NVGcolor innerCol;
NVGcolor outerCol;
// u_scissorExtScale
float scissorExt[2];
float scissorScale[2];
// u_extentRadius
float extent[2];
float radius;
// u_params
float feather;
float strokeMult;
float texType;
float type;
};
struct GLNVGcontext
{
2018-02-03 17:39:28 +01:00
bx::AllocatorI* allocator;
2016-08-29 22:31:11 +02:00
bgfx::ProgramHandle prog;
bgfx::UniformHandle u_scissorMat;
bgfx::UniformHandle u_paintMat;
bgfx::UniformHandle u_innerCol;
bgfx::UniformHandle u_outerCol;
bgfx::UniformHandle u_viewSize;
bgfx::UniformHandle u_scissorExtScale;
bgfx::UniformHandle u_extentRadius;
bgfx::UniformHandle u_params;
bgfx::UniformHandle u_halfTexel;
bgfx::UniformHandle s_tex;
uint64_t state;
bgfx::TextureHandle th;
bgfx::TextureHandle texMissing;
bgfx::TransientVertexBuffer tvb;
2018-02-03 17:39:28 +01:00
bgfx::ViewId viewId;
2016-08-29 22:31:11 +02:00
struct GLNVGtexture* textures;
float view[2];
int ntextures;
int ctextures;
int textureId;
int vertBuf;
int fragSize;
int edgeAntiAlias;
// Per frame buffers
struct GLNVGcall* calls;
int ccalls;
int ncalls;
struct GLNVGpath* paths;
int cpaths;
int npaths;
struct NVGvertex* verts;
int cverts;
int nverts;
unsigned char* uniforms;
int cuniforms;
int nuniforms;
};
static struct GLNVGtexture* glnvg__allocTexture(struct GLNVGcontext* gl)
{
struct GLNVGtexture* tex = NULL;
int i;
for (i = 0; i < gl->ntextures; i++)
{
2018-02-03 17:39:28 +01:00
if (gl->textures[i].id.idx == bgfx::kInvalidHandle)
2016-08-29 22:31:11 +02:00
{
tex = &gl->textures[i];
break;
}
}
if (tex == NULL)
{
if (gl->ntextures+1 > gl->ctextures)
{
int old = gl->ctextures;
gl->ctextures = (gl->ctextures == 0) ? 2 : gl->ctextures*2;
2018-02-03 17:39:28 +01:00
gl->textures = (struct GLNVGtexture*)BX_REALLOC(gl->allocator, gl->textures, sizeof(struct GLNVGtexture)*gl->ctextures);
2017-04-11 08:16:10 +02:00
bx::memSet(&gl->textures[old], 0xff, (gl->ctextures-old)*sizeof(struct GLNVGtexture) );
2016-08-29 22:31:11 +02:00
if (gl->textures == NULL)
{
return NULL;
}
}
tex = &gl->textures[gl->ntextures++];
}
2017-04-11 08:16:10 +02:00
bx::memSet(tex, 0, sizeof(*tex) );
2016-08-29 22:31:11 +02:00
return tex;
}
static struct GLNVGtexture* glnvg__findTexture(struct GLNVGcontext* gl, int id)
{
int i;
for (i = 0; i < gl->ntextures; i++)
{
if (gl->textures[i].id.idx == id)
{
return &gl->textures[i];
}
}
return NULL;
}
static int glnvg__deleteTexture(struct GLNVGcontext* gl, int id)
{
for (int ii = 0; ii < gl->ntextures; ii++)
{
if (gl->textures[ii].id.idx == id)
{
if (bgfx::isValid(gl->textures[ii].id)
&& (gl->textures[ii].flags & NVG_IMAGE_NODELETE) == 0)
{
2018-02-03 17:39:28 +01:00
bgfx::destroy(gl->textures[ii].id);
2016-08-29 22:31:11 +02:00
}
2017-04-11 08:16:10 +02:00
bx::memSet(&gl->textures[ii], 0, sizeof(gl->textures[ii]) );
2018-02-03 17:39:28 +01:00
gl->textures[ii].id.idx = bgfx::kInvalidHandle;
2016-08-29 22:31:11 +02:00
return 1;
}
}
return 0;
}
static int nvgRenderCreate(void* _userPtr)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
2017-04-11 08:16:10 +02:00
bgfx::RendererType::Enum type = bgfx::getRendererType();
2016-08-29 22:31:11 +02:00
gl->prog = bgfx::createProgram(
2017-04-11 08:16:10 +02:00
bgfx::createEmbeddedShader(s_embeddedShaders, type, "vs_nanovg_fill")
, bgfx::createEmbeddedShader(s_embeddedShaders, type, "fs_nanovg_fill")
2016-08-29 22:31:11 +02:00
, true
);
const bgfx::Memory* mem = bgfx::alloc(4*4*4);
uint32_t* bgra8 = (uint32_t*)mem->data;
2017-04-11 08:16:10 +02:00
bx::memSet(bgra8, 0, 4*4*4);
2016-08-29 22:31:11 +02:00
gl->texMissing = bgfx::createTexture2D(4, 4, false, 1, bgfx::TextureFormat::BGRA8, 0, mem);
gl->u_scissorMat = bgfx::createUniform("u_scissorMat", bgfx::UniformType::Mat3);
gl->u_paintMat = bgfx::createUniform("u_paintMat", bgfx::UniformType::Mat3);
gl->u_innerCol = bgfx::createUniform("u_innerCol", bgfx::UniformType::Vec4);
gl->u_outerCol = bgfx::createUniform("u_outerCol", bgfx::UniformType::Vec4);
gl->u_viewSize = bgfx::createUniform("u_viewSize", bgfx::UniformType::Vec4);
gl->u_scissorExtScale = bgfx::createUniform("u_scissorExtScale", bgfx::UniformType::Vec4);
gl->u_extentRadius = bgfx::createUniform("u_extentRadius", bgfx::UniformType::Vec4);
gl->u_params = bgfx::createUniform("u_params", bgfx::UniformType::Vec4);
gl->s_tex = bgfx::createUniform("s_tex", bgfx::UniformType::Int1);
if (bgfx::getRendererType() == bgfx::RendererType::Direct3D9)
{
gl->u_halfTexel = bgfx::createUniform("u_halfTexel", bgfx::UniformType::Vec4);
}
else
{
2018-02-03 17:39:28 +01:00
gl->u_halfTexel.idx = bgfx::kInvalidHandle;
2016-08-29 22:31:11 +02:00
}
s_nvgDecl
.begin()
.add(bgfx::Attrib::Position, 2, bgfx::AttribType::Float)
.add(bgfx::Attrib::TexCoord0, 2, bgfx::AttribType::Float)
.end();
int align = 16;
gl->fragSize = sizeof(struct GLNVGfragUniforms) + align - sizeof(struct GLNVGfragUniforms) % align;
return 1;
}
2018-02-03 17:39:28 +01:00
static int nvgRenderCreateTexture(
void* _userPtr
, int _type
, int _width
, int _height
, int _flags
, const unsigned char* _rgba
)
2016-08-29 22:31:11 +02:00
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
struct GLNVGtexture* tex = glnvg__allocTexture(gl);
if (tex == NULL)
{
return 0;
}
tex->width = _width;
tex->height = _height;
tex->type = _type;
tex->flags = _flags;
uint32_t bytesPerPixel = NVG_TEXTURE_RGBA == tex->type ? 4 : 1;
uint32_t pitch = tex->width * bytesPerPixel;
const bgfx::Memory* mem = NULL;
if (NULL != _rgba)
{
mem = bgfx::copy(_rgba, tex->height * pitch);
}
tex->id = bgfx::createTexture2D(
tex->width
, tex->height
, false
, 1
, NVG_TEXTURE_RGBA == _type ? bgfx::TextureFormat::RGBA8 : bgfx::TextureFormat::R8
, BGFX_TEXTURE_NONE
);
if (NULL != mem)
{
bgfx::updateTexture2D(
tex->id
, 0
, 0
, 0
, 0
, tex->width
, tex->height
, mem
);
}
return bgfx::isValid(tex->id) ? tex->id.idx : 0;
}
static int nvgRenderDeleteTexture(void* _userPtr, int image)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
return glnvg__deleteTexture(gl, image);
}
static int nvgRenderUpdateTexture(void* _userPtr, int image, int x, int y, int w, int h, const unsigned char* data)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
struct GLNVGtexture* tex = glnvg__findTexture(gl, image);
if (tex == NULL)
{
return 0;
}
uint32_t bytesPerPixel = NVG_TEXTURE_RGBA == tex->type ? 4 : 1;
uint32_t pitch = tex->width * bytesPerPixel;
2017-04-11 08:16:10 +02:00
const bgfx::Memory* mem = bgfx::alloc(w * h * bytesPerPixel);
bx::gather(mem->data, data + y * pitch + x * bytesPerPixel, w * bytesPerPixel, h, pitch);
2016-08-29 22:31:11 +02:00
bgfx::updateTexture2D(
tex->id
, 0
, 0
, x
, y
, w
, h
2017-04-11 08:16:10 +02:00
, mem
, UINT16_MAX
2016-08-29 22:31:11 +02:00
);
return 1;
}
static int nvgRenderGetTextureSize(void* _userPtr, int image, int* w, int* h)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
struct GLNVGtexture* tex = glnvg__findTexture(gl, image);
2018-02-03 17:39:28 +01:00
if (NULL == tex
|| !bgfx::isValid(tex->id) )
2016-08-29 22:31:11 +02:00
{
return 0;
}
*w = tex->width;
*h = tex->height;
return 1;
}
static void glnvg__xformToMat3x4(float* m3, float* t)
{
2018-02-03 17:39:28 +01:00
m3[ 0] = t[0];
m3[ 1] = t[1];
m3[ 2] = 0.0f;
m3[ 3] = 0.0f;
m3[ 4] = t[2];
m3[ 5] = t[3];
m3[ 6] = 0.0f;
m3[ 7] = 0.0f;
m3[ 8] = t[4];
m3[ 9] = t[5];
2016-08-29 22:31:11 +02:00
m3[10] = 1.0f;
m3[11] = 0.0f;
}
static NVGcolor glnvg__premulColor(NVGcolor c)
{
c.r *= c.a;
c.g *= c.a;
c.b *= c.a;
return c;
}
2018-02-03 17:39:28 +01:00
static int glnvg__convertPaint(
struct GLNVGcontext* gl
, struct GLNVGfragUniforms* frag
, struct NVGpaint* paint
, struct NVGscissor* scissor
, float width
, float fringe
)
2016-08-29 22:31:11 +02:00
{
struct GLNVGtexture* tex = NULL;
float invxform[6] = {};
2017-04-11 08:16:10 +02:00
bx::memSet(frag, 0, sizeof(*frag) );
2016-08-29 22:31:11 +02:00
frag->innerCol = glnvg__premulColor(paint->innerColor);
frag->outerCol = glnvg__premulColor(paint->outerColor);
if (scissor->extent[0] < -0.5f || scissor->extent[1] < -0.5f)
{
2017-04-11 08:16:10 +02:00
bx::memSet(frag->scissorMat, 0, sizeof(frag->scissorMat) );
2016-08-29 22:31:11 +02:00
frag->scissorExt[0] = 1.0f;
frag->scissorExt[1] = 1.0f;
frag->scissorScale[0] = 1.0f;
frag->scissorScale[1] = 1.0f;
}
else
{
nvgTransformInverse(invxform, scissor->xform);
glnvg__xformToMat3x4(frag->scissorMat, invxform);
frag->scissorExt[0] = scissor->extent[0];
frag->scissorExt[1] = scissor->extent[1];
frag->scissorScale[0] = sqrtf(scissor->xform[0]*scissor->xform[0] + scissor->xform[2]*scissor->xform[2]) / fringe;
frag->scissorScale[1] = sqrtf(scissor->xform[1]*scissor->xform[1] + scissor->xform[3]*scissor->xform[3]) / fringe;
}
2017-04-11 08:16:10 +02:00
bx::memCopy(frag->extent, paint->extent, sizeof(frag->extent) );
2016-08-29 22:31:11 +02:00
frag->strokeMult = (width*0.5f + fringe*0.5f) / fringe;
gl->th = gl->texMissing;
if (paint->image != 0)
{
tex = glnvg__findTexture(gl, paint->image);
if (tex == NULL)
{
return 0;
}
nvgTransformInverse(invxform, paint->xform);
frag->type = NSVG_SHADER_FILLIMG;
2018-02-03 17:39:28 +01:00
2016-08-29 22:31:11 +02:00
if (tex->type == NVG_TEXTURE_RGBA)
2018-02-03 17:39:28 +01:00
{
2016-08-29 22:31:11 +02:00
frag->texType = (tex->flags & NVG_IMAGE_PREMULTIPLIED) ? 0.0f : 1.0f;
2018-02-03 17:39:28 +01:00
}
2016-08-29 22:31:11 +02:00
else
2018-02-03 17:39:28 +01:00
{
2016-08-29 22:31:11 +02:00
frag->texType = 2.0f;
2018-02-03 17:39:28 +01:00
}
2016-08-29 22:31:11 +02:00
gl->th = tex->id;
}
else
{
frag->type = NSVG_SHADER_FILLGRAD;
frag->radius = paint->radius;
frag->feather = paint->feather;
nvgTransformInverse(invxform, paint->xform);
}
glnvg__xformToMat3x4(frag->paintMat, invxform);
return 1;
}
static void glnvg__mat3(float* dst, float* src)
{
2018-02-03 17:39:28 +01:00
dst[0] = src[ 0];
dst[1] = src[ 1];
dst[2] = src[ 2];
2016-08-29 22:31:11 +02:00
2018-02-03 17:39:28 +01:00
dst[3] = src[ 4];
dst[4] = src[ 5];
dst[5] = src[ 6];
2016-08-29 22:31:11 +02:00
2018-02-03 17:39:28 +01:00
dst[6] = src[ 8];
dst[7] = src[ 9];
2016-08-29 22:31:11 +02:00
dst[8] = src[10];
}
static struct GLNVGfragUniforms* nvg__fragUniformPtr(struct GLNVGcontext* gl, int i)
{
return (struct GLNVGfragUniforms*)&gl->uniforms[i];
}
static void nvgRenderSetUniforms(struct GLNVGcontext* gl, int uniformOffset, int image)
{
struct GLNVGfragUniforms* frag = nvg__fragUniformPtr(gl, uniformOffset);
float tmp[9]; // Maybe there's a way to get rid of this...
glnvg__mat3(tmp, frag->scissorMat);
bgfx::setUniform(gl->u_scissorMat, tmp);
glnvg__mat3(tmp, frag->paintMat);
bgfx::setUniform(gl->u_paintMat, tmp);
bgfx::setUniform(gl->u_innerCol, frag->innerCol.rgba);
bgfx::setUniform(gl->u_outerCol, frag->outerCol.rgba);
bgfx::setUniform(gl->u_scissorExtScale, &frag->scissorExt[0]);
bgfx::setUniform(gl->u_extentRadius, &frag->extent[0]);
bgfx::setUniform(gl->u_params, &frag->feather);
bgfx::TextureHandle handle = gl->texMissing;
if (image != 0)
{
struct GLNVGtexture* tex = glnvg__findTexture(gl, image);
if (tex != NULL)
{
handle = tex->id;
if (bgfx::isValid(gl->u_halfTexel) )
{
float halfTexel[4] = { 0.5f / tex->width, 0.5f / tex->height };
bgfx::setUniform(gl->u_halfTexel, halfTexel);
}
}
}
gl->th = handle;
}
static void nvgRenderViewport(void* _userPtr, int width, int height, float devicePixelRatio)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
gl->view[0] = (float)width;
gl->view[1] = (float)height;
2018-02-03 17:39:28 +01:00
bgfx::setViewRect(gl->viewId, 0, 0, width * devicePixelRatio, height * devicePixelRatio);
2016-08-29 22:31:11 +02:00
}
static void fan(uint32_t _start, uint32_t _count)
{
uint32_t numTris = _count-2;
bgfx::TransientIndexBuffer tib;
bgfx::allocTransientIndexBuffer(&tib, numTris*3);
uint16_t* data = (uint16_t*)tib.data;
for (uint32_t ii = 0; ii < numTris; ++ii)
{
data[ii*3+0] = _start;
data[ii*3+1] = _start + ii + 1;
data[ii*3+2] = _start + ii + 2;
}
bgfx::setIndexBuffer(&tib);
}
static void glnvg__fill(struct GLNVGcontext* gl, struct GLNVGcall* call)
{
struct GLNVGpath* paths = &gl->paths[call->pathOffset];
int i, npaths = call->pathCount;
// set bindpoint for solid loc
nvgRenderSetUniforms(gl, call->uniformOffset, 0);
for (i = 0; i < npaths; i++)
{
if (2 < paths[i].fillCount)
{
bgfx::setState(0);
bgfx::setStencil(0
| BGFX_STENCIL_TEST_ALWAYS
| BGFX_STENCIL_FUNC_RMASK(0xff)
| BGFX_STENCIL_OP_FAIL_S_KEEP
| BGFX_STENCIL_OP_FAIL_Z_KEEP
| BGFX_STENCIL_OP_PASS_Z_INCR
, 0
| BGFX_STENCIL_TEST_ALWAYS
| BGFX_STENCIL_FUNC_RMASK(0xff)
| BGFX_STENCIL_OP_FAIL_S_KEEP
| BGFX_STENCIL_OP_FAIL_Z_KEEP
| BGFX_STENCIL_OP_PASS_Z_DECR
);
2018-02-03 17:39:28 +01:00
bgfx::setVertexBuffer(0, &gl->tvb);
2016-08-29 22:31:11 +02:00
bgfx::setTexture(0, gl->s_tex, gl->th);
fan(paths[i].fillOffset, paths[i].fillCount);
2018-02-03 17:39:28 +01:00
bgfx::submit(gl->viewId, gl->prog);
2016-08-29 22:31:11 +02:00
}
}
// Draw aliased off-pixels
nvgRenderSetUniforms(gl, call->uniformOffset + gl->fragSize, call->image);
if (gl->edgeAntiAlias)
{
// Draw fringes
for (i = 0; i < npaths; i++)
{
bgfx::setState(gl->state
| BGFX_STATE_PT_TRISTRIP
);
bgfx::setStencil(0
| BGFX_STENCIL_TEST_EQUAL
| BGFX_STENCIL_FUNC_RMASK(0xff)
| BGFX_STENCIL_OP_FAIL_S_KEEP
| BGFX_STENCIL_OP_FAIL_Z_KEEP
| BGFX_STENCIL_OP_PASS_Z_KEEP
);
2018-02-03 17:39:28 +01:00
bgfx::setVertexBuffer(0, &gl->tvb, paths[i].strokeOffset, paths[i].strokeCount);
2016-08-29 22:31:11 +02:00
bgfx::setTexture(0, gl->s_tex, gl->th);
2018-02-03 17:39:28 +01:00
bgfx::submit(gl->viewId, gl->prog);
2016-08-29 22:31:11 +02:00
}
}
// Draw fill
bgfx::setState(gl->state);
2018-02-03 17:39:28 +01:00
bgfx::setVertexBuffer(0, &gl->tvb, call->vertexOffset, call->vertexCount);
2016-08-29 22:31:11 +02:00
bgfx::setTexture(0, gl->s_tex, gl->th);
bgfx::setStencil(0
| BGFX_STENCIL_TEST_NOTEQUAL
| BGFX_STENCIL_FUNC_RMASK(0xff)
| BGFX_STENCIL_OP_FAIL_S_ZERO
| BGFX_STENCIL_OP_FAIL_Z_ZERO
| BGFX_STENCIL_OP_PASS_Z_ZERO
);
2018-02-03 17:39:28 +01:00
bgfx::submit(gl->viewId, gl->prog);
2016-08-29 22:31:11 +02:00
}
static void glnvg__convexFill(struct GLNVGcontext* gl, struct GLNVGcall* call)
{
struct GLNVGpath* paths = &gl->paths[call->pathOffset];
int i, npaths = call->pathCount;
nvgRenderSetUniforms(gl, call->uniformOffset, call->image);
for (i = 0; i < npaths; i++)
{
if (paths[i].fillCount == 0) continue;
bgfx::setState(gl->state);
2018-02-03 17:39:28 +01:00
bgfx::setVertexBuffer(0, &gl->tvb);
2016-08-29 22:31:11 +02:00
bgfx::setTexture(0, gl->s_tex, gl->th);
fan(paths[i].fillOffset, paths[i].fillCount);
2018-02-03 17:39:28 +01:00
bgfx::submit(gl->viewId, gl->prog);
2016-08-29 22:31:11 +02:00
}
if (gl->edgeAntiAlias)
{
// Draw fringes
for (i = 0; i < npaths; i++)
{
bgfx::setState(gl->state
| BGFX_STATE_PT_TRISTRIP
);
2018-02-03 17:39:28 +01:00
bgfx::setVertexBuffer(0, &gl->tvb, paths[i].strokeOffset, paths[i].strokeCount);
2016-08-29 22:31:11 +02:00
bgfx::setTexture(0, gl->s_tex, gl->th);
2018-02-03 17:39:28 +01:00
bgfx::submit(gl->viewId, gl->prog);
2016-08-29 22:31:11 +02:00
}
}
}
static void glnvg__stroke(struct GLNVGcontext* gl, struct GLNVGcall* call)
{
struct GLNVGpath* paths = &gl->paths[call->pathOffset];
int npaths = call->pathCount, i;
nvgRenderSetUniforms(gl, call->uniformOffset, call->image);
// Draw Strokes
for (i = 0; i < npaths; i++)
{
bgfx::setState(gl->state
| BGFX_STATE_PT_TRISTRIP
);
2018-02-03 17:39:28 +01:00
bgfx::setVertexBuffer(0, &gl->tvb, paths[i].strokeOffset, paths[i].strokeCount);
2016-08-29 22:31:11 +02:00
bgfx::setTexture(0, gl->s_tex, gl->th);
2018-02-03 17:39:28 +01:00
bgfx::submit(gl->viewId, gl->prog);
2016-08-29 22:31:11 +02:00
}
}
static void glnvg__triangles(struct GLNVGcontext* gl, struct GLNVGcall* call)
{
if (3 <= call->vertexCount)
{
nvgRenderSetUniforms(gl, call->uniformOffset, call->image);
bgfx::setState(gl->state);
2018-02-03 17:39:28 +01:00
bgfx::setVertexBuffer(0, &gl->tvb, call->vertexOffset, call->vertexCount);
2016-08-29 22:31:11 +02:00
bgfx::setTexture(0, gl->s_tex, gl->th);
2018-02-03 17:39:28 +01:00
bgfx::submit(gl->viewId, gl->prog);
2016-08-29 22:31:11 +02:00
}
}
static const uint64_t s_blend[] =
{
BGFX_STATE_BLEND_ZERO,
BGFX_STATE_BLEND_ONE,
BGFX_STATE_BLEND_SRC_COLOR,
BGFX_STATE_BLEND_INV_SRC_COLOR,
BGFX_STATE_BLEND_DST_COLOR,
BGFX_STATE_BLEND_INV_DST_COLOR,
BGFX_STATE_BLEND_SRC_ALPHA,
BGFX_STATE_BLEND_INV_SRC_ALPHA,
BGFX_STATE_BLEND_DST_ALPHA,
BGFX_STATE_BLEND_INV_DST_ALPHA,
BGFX_STATE_BLEND_SRC_ALPHA_SAT,
};
static uint64_t glnvg_convertBlendFuncFactor(int factor)
{
const uint32_t numtz = bx::uint32_cnttz(factor);
const uint32_t idx = bx::uint32_min(numtz, BX_COUNTOF(s_blend)-1);
return s_blend[idx];
}
2017-04-11 08:16:10 +02:00
static GLNVGblend glnvg__blendCompositeOperation(NVGcompositeOperationState op)
2016-08-29 22:31:11 +02:00
{
2017-04-11 08:16:10 +02:00
GLNVGblend blend;
blend.srcRGB = glnvg_convertBlendFuncFactor(op.srcRGB);
blend.dstRGB = glnvg_convertBlendFuncFactor(op.dstRGB);
blend.srcAlpha = glnvg_convertBlendFuncFactor(op.srcAlpha);
blend.dstAlpha = glnvg_convertBlendFuncFactor(op.dstAlpha);
if (blend.srcRGB == BGFX_STATE_NONE || blend.dstRGB == BGFX_STATE_NONE || blend.srcAlpha == BGFX_STATE_NONE || blend.dstAlpha == BGFX_STATE_NONE)
{
blend.srcRGB = BGFX_STATE_BLEND_ONE;
blend.dstRGB = BGFX_STATE_BLEND_INV_SRC_ALPHA;
blend.srcAlpha = BGFX_STATE_BLEND_ONE;
blend.dstAlpha = BGFX_STATE_BLEND_INV_SRC_ALPHA;
}
return blend;
2016-08-29 22:31:11 +02:00
}
2017-04-11 08:16:10 +02:00
static void nvgRenderFlush(void* _userPtr)
2016-08-29 22:31:11 +02:00
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
if (gl->ncalls > 0)
{
bgfx::allocTransientVertexBuffer(&gl->tvb, gl->nverts, s_nvgDecl);
int allocated = gl->tvb.size/gl->tvb.stride;
if (allocated < gl->nverts)
{
gl->nverts = allocated;
BX_WARN(true, "Vertex number truncated due to transient vertex buffer overflow");
}
2017-04-11 08:16:10 +02:00
bx::memCopy(gl->tvb.data, gl->verts, gl->nverts * sizeof(struct NVGvertex) );
2016-08-29 22:31:11 +02:00
bgfx::setUniform(gl->u_viewSize, gl->view);
for (uint32_t ii = 0, num = gl->ncalls; ii < num; ++ii)
{
struct GLNVGcall* call = &gl->calls[ii];
2017-04-11 08:16:10 +02:00
const GLNVGblend* blend = &call->blendFunc;
gl->state = BGFX_STATE_BLEND_FUNC_SEPARATE(blend->srcRGB, blend->dstRGB, blend->srcAlpha, blend->dstAlpha)
| BGFX_STATE_RGB_WRITE
| BGFX_STATE_ALPHA_WRITE
;
2016-08-29 22:31:11 +02:00
switch (call->type)
{
case GLNVG_FILL:
glnvg__fill(gl, call);
break;
case GLNVG_CONVEXFILL:
glnvg__convexFill(gl, call);
break;
case GLNVG_STROKE:
glnvg__stroke(gl, call);
break;
case GLNVG_TRIANGLES:
glnvg__triangles(gl, call);
break;
}
}
}
// Reset calls
gl->nverts = 0;
gl->npaths = 0;
gl->ncalls = 0;
gl->nuniforms = 0;
}
static int glnvg__maxVertCount(const struct NVGpath* paths, int npaths)
{
int i, count = 0;
for (i = 0; i < npaths; i++)
{
count += paths[i].nfill;
count += paths[i].nstroke;
}
return count;
}
static int glnvg__maxi(int a, int b) { return a > b ? a : b; }
static struct GLNVGcall* glnvg__allocCall(struct GLNVGcontext* gl)
{
struct GLNVGcall* ret = NULL;
if (gl->ncalls+1 > gl->ccalls)
{
gl->ccalls = gl->ccalls == 0 ? 32 : gl->ccalls * 2;
2018-02-03 17:39:28 +01:00
gl->calls = (struct GLNVGcall*)BX_REALLOC(gl->allocator, gl->calls, sizeof(struct GLNVGcall) * gl->ccalls);
2016-08-29 22:31:11 +02:00
}
ret = &gl->calls[gl->ncalls++];
2017-04-11 08:16:10 +02:00
bx::memSet(ret, 0, sizeof(struct GLNVGcall) );
2016-08-29 22:31:11 +02:00
return ret;
}
static int glnvg__allocPaths(struct GLNVGcontext* gl, int n)
{
int ret = 0;
if (gl->npaths + n > gl->cpaths) {
GLNVGpath* paths;
int cpaths = glnvg__maxi(gl->npaths + n, 128) + gl->cpaths / 2; // 1.5x Overallocate
2018-02-03 17:39:28 +01:00
paths = (GLNVGpath*)BX_REALLOC(gl->allocator, gl->paths, sizeof(GLNVGpath) * cpaths);
2016-08-29 22:31:11 +02:00
if (paths == NULL) return -1;
gl->paths = paths;
gl->cpaths = cpaths;
}
ret = gl->npaths;
gl->npaths += n;
return ret;
}
static int glnvg__allocVerts(GLNVGcontext* gl, int n)
{
int ret = 0;
if (gl->nverts+n > gl->cverts)
{
NVGvertex* verts;
int cverts = glnvg__maxi(gl->nverts + n, 4096) + gl->cverts/2; // 1.5x Overallocate
2018-02-03 17:39:28 +01:00
verts = (NVGvertex*)BX_REALLOC(gl->allocator, gl->verts, sizeof(NVGvertex) * cverts);
2016-08-29 22:31:11 +02:00
if (verts == NULL) return -1;
gl->verts = verts;
gl->cverts = cverts;
}
ret = gl->nverts;
gl->nverts += n;
return ret;
}
static int glnvg__allocFragUniforms(struct GLNVGcontext* gl, int n)
{
int ret = 0, structSize = gl->fragSize;
if (gl->nuniforms+n > gl->cuniforms)
{
gl->cuniforms = gl->cuniforms == 0 ? glnvg__maxi(n, 32) : gl->cuniforms * 2;
2018-02-03 17:39:28 +01:00
gl->uniforms = (unsigned char*)BX_REALLOC(gl->allocator, gl->uniforms, gl->cuniforms * structSize);
2016-08-29 22:31:11 +02:00
}
ret = gl->nuniforms * structSize;
gl->nuniforms += n;
return ret;
}
static void glnvg__vset(struct NVGvertex* vtx, float x, float y, float u, float v)
{
vtx->x = x;
vtx->y = y;
vtx->u = u;
vtx->v = v;
}
2018-02-03 17:39:28 +01:00
static void nvgRenderFill(
void* _userPtr
, NVGpaint* paint
, NVGcompositeOperationState compositeOperation
, NVGscissor* scissor
, float fringe
, const float* bounds
, const NVGpath* paths
, int npaths
)
2016-08-29 22:31:11 +02:00
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
struct GLNVGcall* call = glnvg__allocCall(gl);
struct NVGvertex* quad;
struct GLNVGfragUniforms* frag;
int i, maxverts, offset;
call->type = GLNVG_FILL;
call->pathOffset = glnvg__allocPaths(gl, npaths);
call->pathCount = npaths;
call->image = paint->image;
2017-04-11 08:16:10 +02:00
call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
2016-08-29 22:31:11 +02:00
if (npaths == 1 && paths[0].convex)
{
call->type = GLNVG_CONVEXFILL;
}
// Allocate vertices for all the paths.
maxverts = glnvg__maxVertCount(paths, npaths) + 6;
offset = glnvg__allocVerts(gl, maxverts);
for (i = 0; i < npaths; i++)
{
struct GLNVGpath* copy = &gl->paths[call->pathOffset + i];
const struct NVGpath* path = &paths[i];
2017-04-11 08:16:10 +02:00
bx::memSet(copy, 0, sizeof(struct GLNVGpath) );
2016-08-29 22:31:11 +02:00
if (path->nfill > 0)
{
copy->fillOffset = offset;
copy->fillCount = path->nfill;
2017-04-11 08:16:10 +02:00
bx::memCopy(&gl->verts[offset], path->fill, sizeof(struct NVGvertex) * path->nfill);
2016-08-29 22:31:11 +02:00
offset += path->nfill;
}
if (path->nstroke > 0)
{
copy->strokeOffset = offset;
copy->strokeCount = path->nstroke;
2017-04-11 08:16:10 +02:00
bx::memCopy(&gl->verts[offset], path->stroke, sizeof(struct NVGvertex) * path->nstroke);
2016-08-29 22:31:11 +02:00
offset += path->nstroke;
}
}
// Quad
call->vertexOffset = offset;
call->vertexCount = 6;
quad = &gl->verts[call->vertexOffset];
glnvg__vset(&quad[0], bounds[0], bounds[3], 0.5f, 1.0f);
glnvg__vset(&quad[1], bounds[2], bounds[3], 0.5f, 1.0f);
glnvg__vset(&quad[2], bounds[2], bounds[1], 0.5f, 1.0f);
glnvg__vset(&quad[3], bounds[0], bounds[3], 0.5f, 1.0f);
glnvg__vset(&quad[4], bounds[2], bounds[1], 0.5f, 1.0f);
glnvg__vset(&quad[5], bounds[0], bounds[1], 0.5f, 1.0f);
// Setup uniforms for draw calls
if (call->type == GLNVG_FILL)
{
call->uniformOffset = glnvg__allocFragUniforms(gl, 2);
// Simple shader for stencil
frag = nvg__fragUniformPtr(gl, call->uniformOffset);
2017-04-11 08:16:10 +02:00
bx::memSet(frag, 0, sizeof(*frag) );
2016-08-29 22:31:11 +02:00
frag->type = NSVG_SHADER_SIMPLE;
// Fill shader
glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset + gl->fragSize), paint, scissor, fringe, fringe);
}
else
{
call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
// Fill shader
glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, fringe, fringe);
}
}
2018-02-03 17:39:28 +01:00
static void nvgRenderStroke(
void* _userPtr
, struct NVGpaint* paint
, NVGcompositeOperationState compositeOperation
, struct NVGscissor* scissor
, float fringe
, float strokeWidth
, const struct NVGpath* paths
, int npaths
)
2016-08-29 22:31:11 +02:00
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
struct GLNVGcall* call = glnvg__allocCall(gl);
int i, maxverts, offset;
call->type = GLNVG_STROKE;
call->pathOffset = glnvg__allocPaths(gl, npaths);
call->pathCount = npaths;
call->image = paint->image;
2017-04-11 08:16:10 +02:00
call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
2016-08-29 22:31:11 +02:00
// Allocate vertices for all the paths.
maxverts = glnvg__maxVertCount(paths, npaths);
offset = glnvg__allocVerts(gl, maxverts);
for (i = 0; i < npaths; i++)
{
struct GLNVGpath* copy = &gl->paths[call->pathOffset + i];
const struct NVGpath* path = &paths[i];
2017-04-11 08:16:10 +02:00
bx::memSet(copy, 0, sizeof(struct GLNVGpath) );
2016-08-29 22:31:11 +02:00
if (path->nstroke)
{
copy->strokeOffset = offset;
copy->strokeCount = path->nstroke;
2017-04-11 08:16:10 +02:00
bx::memCopy(&gl->verts[offset], path->stroke, sizeof(struct NVGvertex) * path->nstroke);
2016-08-29 22:31:11 +02:00
offset += path->nstroke;
}
}
// Fill shader
call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
glnvg__convertPaint(gl, nvg__fragUniformPtr(gl, call->uniformOffset), paint, scissor, strokeWidth, fringe);
}
2017-04-11 08:16:10 +02:00
static void nvgRenderTriangles(void* _userPtr, struct NVGpaint* paint, NVGcompositeOperationState compositeOperation, struct NVGscissor* scissor,
2016-08-29 22:31:11 +02:00
const struct NVGvertex* verts, int nverts)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
struct GLNVGcall* call = glnvg__allocCall(gl);
struct GLNVGfragUniforms* frag;
call->type = GLNVG_TRIANGLES;
call->image = paint->image;
2017-04-11 08:16:10 +02:00
call->blendFunc = glnvg__blendCompositeOperation(compositeOperation);
2016-08-29 22:31:11 +02:00
// Allocate vertices for all the paths.
call->vertexOffset = glnvg__allocVerts(gl, nverts);
call->vertexCount = nverts;
2017-04-11 08:16:10 +02:00
bx::memCopy(&gl->verts[call->vertexOffset], verts, sizeof(struct NVGvertex) * nverts);
2016-08-29 22:31:11 +02:00
// Fill shader
call->uniformOffset = glnvg__allocFragUniforms(gl, 1);
frag = nvg__fragUniformPtr(gl, call->uniformOffset);
glnvg__convertPaint(gl, frag, paint, scissor, 1.0f, 1.0f);
frag->type = NSVG_SHADER_IMG;
}
static void nvgRenderDelete(void* _userPtr)
{
struct GLNVGcontext* gl = (struct GLNVGcontext*)_userPtr;
if (gl == NULL)
{
return;
}
2018-02-03 17:39:28 +01:00
bgfx::destroy(gl->prog);
bgfx::destroy(gl->texMissing);
2016-08-29 22:31:11 +02:00
2018-02-03 17:39:28 +01:00
bgfx::destroy(gl->u_scissorMat);
bgfx::destroy(gl->u_paintMat);
bgfx::destroy(gl->u_innerCol);
bgfx::destroy(gl->u_outerCol);
bgfx::destroy(gl->u_viewSize);
bgfx::destroy(gl->u_scissorExtScale);
bgfx::destroy(gl->u_extentRadius);
bgfx::destroy(gl->u_params);
bgfx::destroy(gl->s_tex);
2016-08-29 22:31:11 +02:00
if (bgfx::isValid(gl->u_halfTexel) )
{
2018-02-03 17:39:28 +01:00
bgfx::destroy(gl->u_halfTexel);
2016-08-29 22:31:11 +02:00
}
for (uint32_t ii = 0, num = gl->ntextures; ii < num; ++ii)
{
if (bgfx::isValid(gl->textures[ii].id)
&& (gl->textures[ii].flags & NVG_IMAGE_NODELETE) == 0)
{
2018-02-03 17:39:28 +01:00
bgfx::destroy(gl->textures[ii].id);
2016-08-29 22:31:11 +02:00
}
}
2018-02-03 17:39:28 +01:00
BX_FREE(gl->allocator, gl->uniforms);
BX_FREE(gl->allocator, gl->verts);
BX_FREE(gl->allocator, gl->paths);
BX_FREE(gl->allocator, gl->calls);
BX_FREE(gl->allocator, gl->textures);
BX_FREE(gl->allocator, gl);
2016-08-29 22:31:11 +02:00
}
} // namespace
2018-02-03 17:39:28 +01:00
NVGcontext* nvgCreate(int32_t _edgeaa, bgfx::ViewId _viewId, bx::AllocatorI* _allocator)
2016-08-29 22:31:11 +02:00
{
if (NULL == _allocator)
{
2018-02-03 17:39:28 +01:00
static bx::DefaultAllocator allocator;
2016-08-29 22:31:11 +02:00
_allocator = &allocator;
}
struct NVGparams params;
struct NVGcontext* ctx = NULL;
struct GLNVGcontext* gl = (struct GLNVGcontext*)BX_ALLOC(_allocator, sizeof(struct GLNVGcontext) );
2018-02-03 17:39:28 +01:00
if (gl == NULL)
{
goto error;
}
2017-04-11 08:16:10 +02:00
bx::memSet(gl, 0, sizeof(struct GLNVGcontext) );
2016-08-29 22:31:11 +02:00
2017-04-11 08:16:10 +02:00
bx::memSet(&params, 0, sizeof(params) );
2016-08-29 22:31:11 +02:00
params.renderCreate = nvgRenderCreate;
params.renderCreateTexture = nvgRenderCreateTexture;
params.renderDeleteTexture = nvgRenderDeleteTexture;
params.renderUpdateTexture = nvgRenderUpdateTexture;
params.renderGetTextureSize = nvgRenderGetTextureSize;
params.renderViewport = nvgRenderViewport;
params.renderFlush = nvgRenderFlush;
params.renderFill = nvgRenderFill;
params.renderStroke = nvgRenderStroke;
params.renderTriangles = nvgRenderTriangles;
params.renderDelete = nvgRenderDelete;
2018-02-03 17:39:28 +01:00
params.userPtr = gl;
params.edgeAntiAlias = _edgeaa;
2016-08-29 22:31:11 +02:00
2018-02-03 17:39:28 +01:00
gl->allocator = _allocator;
gl->edgeAntiAlias = _edgeaa;
gl->viewId = _viewId;
2016-08-29 22:31:11 +02:00
ctx = nvgCreateInternal(&params);
if (ctx == NULL) goto error;
return ctx;
error:
// 'gl' is freed by nvgDeleteInternal.
if (ctx != NULL)
{
nvgDeleteInternal(ctx);
}
return NULL;
}
2018-02-03 17:39:28 +01:00
NVGcontext* nvgCreate(int32_t _edgeaa, bgfx::ViewId _viewId) {
return nvgCreate(_edgeaa, _viewId, NULL);
2016-08-29 22:31:11 +02:00
}
2018-02-03 17:39:28 +01:00
void nvgDelete(struct NVGcontext* _ctx)
2016-08-29 22:31:11 +02:00
{
2018-02-03 17:39:28 +01:00
nvgDeleteInternal(_ctx);
2016-08-29 22:31:11 +02:00
}
2018-02-03 17:39:28 +01:00
void nvgSetViewId(struct NVGcontext* _ctx, bgfx::ViewId _viewId)
2016-08-29 22:31:11 +02:00
{
2018-02-03 17:39:28 +01:00
struct NVGparams* params = nvgInternalParams(_ctx);
2016-08-29 22:31:11 +02:00
struct GLNVGcontext* gl = (struct GLNVGcontext*)params->userPtr;
2018-02-03 17:39:28 +01:00
gl->viewId = _viewId;
2016-08-29 22:31:11 +02:00
}
2018-02-03 17:39:28 +01:00
uint16_t nvgGetViewId(struct NVGcontext* _ctx)
2016-08-29 22:31:11 +02:00
{
2018-02-03 17:39:28 +01:00
struct NVGparams* params = nvgInternalParams(_ctx);
2016-08-29 22:31:11 +02:00
struct GLNVGcontext* gl = (struct GLNVGcontext*)params->userPtr;
2018-02-03 17:39:28 +01:00
return gl->viewId;
2016-08-29 22:31:11 +02:00
}
2018-02-03 17:39:28 +01:00
bgfx::TextureHandle nvglImageHandle(NVGcontext* _ctx, int32_t _image)
2016-08-29 22:31:11 +02:00
{
2018-02-03 17:39:28 +01:00
GLNVGcontext* gl = (GLNVGcontext*)nvgInternalParams(_ctx)->userPtr;
GLNVGtexture* tex = glnvg__findTexture(gl, _image);
2016-08-29 22:31:11 +02:00
return tex->id;
}
2018-02-03 17:39:28 +01:00
NVGLUframebuffer* nvgluCreateFramebuffer(NVGcontext* ctx, int32_t width, int32_t height, int32_t imageFlags, bgfx::ViewId viewId)
2017-04-11 08:16:10 +02:00
{
2018-02-03 17:39:28 +01:00
NVGLUframebuffer* framebuffer = nvgluCreateFramebuffer(ctx, width, height, imageFlags);
2017-04-11 08:16:10 +02:00
if (framebuffer != NULL)
{
nvgluSetViewFramebuffer(viewId, framebuffer);
}
2018-02-03 17:39:28 +01:00
2017-04-11 08:16:10 +02:00
return framebuffer;
}
2018-02-03 17:39:28 +01:00
NVGLUframebuffer* nvgluCreateFramebuffer(NVGcontext* _ctx, int32_t _width, int32_t _height, int32_t _imageFlags)
2017-04-11 08:16:10 +02:00
{
2018-02-03 17:39:28 +01:00
BX_UNUSED(_imageFlags);
bgfx::TextureHandle textures[] =
{
bgfx::createTexture2D(_width, _height, false, 1, bgfx::TextureFormat::RGBA8, BGFX_TEXTURE_RT),
bgfx::createTexture2D(_width, _height, false, 1, bgfx::TextureFormat::D24S8, BGFX_TEXTURE_RT | BGFX_TEXTURE_RT_WRITE_ONLY)
};
bgfx::FrameBufferHandle fbh = bgfx::createFrameBuffer(
BX_COUNTOF(textures)
, textures
, true
);
if (!bgfx::isValid(fbh) )
2017-04-11 08:16:10 +02:00
{
2016-08-29 22:31:11 +02:00
return NULL;
}
2018-02-03 17:39:28 +01:00
struct NVGparams* params = nvgInternalParams(_ctx);
struct GLNVGcontext* gl = (struct GLNVGcontext*)params->userPtr;
struct GLNVGtexture* tex = glnvg__allocTexture(gl);
if (NULL == tex)
2016-08-29 22:31:11 +02:00
{
2018-02-03 17:39:28 +01:00
bgfx::destroy(fbh);
2016-08-29 22:31:11 +02:00
return NULL;
}
2018-02-03 17:39:28 +01:00
tex->width = _width;
tex->height = _height;
tex->type = NVG_TEXTURE_RGBA;
tex->flags = _imageFlags | NVG_IMAGE_PREMULTIPLIED;
tex->id = bgfx::getTexture(fbh);
NVGLUframebuffer* framebuffer = BX_NEW(gl->allocator, NVGLUframebuffer);
framebuffer->ctx = _ctx;
framebuffer->image = tex->id.idx;
framebuffer->handle = fbh;
2016-08-29 22:31:11 +02:00
return framebuffer;
}
2018-02-03 17:39:28 +01:00
void nvgluBindFramebuffer(NVGLUframebuffer* _framebuffer)
2017-04-11 08:16:10 +02:00
{
2016-08-29 22:31:11 +02:00
static NVGcontext* s_prevCtx = NULL;
2018-02-03 17:39:28 +01:00
static bgfx::ViewId s_prevViewId;
if (_framebuffer != NULL)
2017-04-11 08:16:10 +02:00
{
2018-02-03 17:39:28 +01:00
s_prevCtx = _framebuffer->ctx;
s_prevViewId = nvgGetViewId(_framebuffer->ctx);
nvgSetViewId(_framebuffer->ctx, _framebuffer->viewId);
}
else if (s_prevCtx != NULL)
{
nvgSetViewId(s_prevCtx, s_prevViewId);
2016-08-29 22:31:11 +02:00
}
}
2018-02-03 17:39:28 +01:00
void nvgluDeleteFramebuffer(NVGLUframebuffer* _framebuffer)
2017-04-11 08:16:10 +02:00
{
2018-02-03 17:39:28 +01:00
if (_framebuffer == NULL)
2017-04-11 08:16:10 +02:00
{
2018-02-03 17:39:28 +01:00
return;
2017-04-11 08:16:10 +02:00
}
2018-02-03 17:39:28 +01:00
if (bgfx::isValid(_framebuffer->handle))
2017-04-11 08:16:10 +02:00
{
2018-02-03 17:39:28 +01:00
bgfx::destroy(_framebuffer->handle);
2017-04-11 08:16:10 +02:00
}
2018-02-03 17:39:28 +01:00
struct NVGparams* params = nvgInternalParams(_framebuffer->ctx);
struct GLNVGcontext* gl = (struct GLNVGcontext*)params->userPtr;
glnvg__deleteTexture(gl, _framebuffer->image);
BX_DELETE(gl->allocator, _framebuffer);
2016-08-29 22:31:11 +02:00
}
2017-04-11 08:16:10 +02:00
2018-02-03 17:39:28 +01:00
void nvgluSetViewFramebuffer(bgfx::ViewId _viewId, NVGLUframebuffer* _framebuffer)
2017-04-11 08:16:10 +02:00
{
2018-02-03 17:39:28 +01:00
_framebuffer->viewId = _viewId;
bgfx::setViewFrameBuffer(_viewId, _framebuffer->handle);
bgfx::setViewMode(_viewId, bgfx::ViewMode::Sequential);
2017-04-11 08:16:10 +02:00
}