Changeset View
Changeset View
Standalone View
Standalone View
source/blender/gpu/intern/gpu_index_buffer.cc
| Show All 17 Lines | |||||
| */ | */ | ||||
| /** \file | /** \file | ||||
| * \ingroup gpu | * \ingroup gpu | ||||
| * | * | ||||
| * GPU element list (AKA index buffer) | * GPU element list (AKA index buffer) | ||||
| */ | */ | ||||
| #include "atomic_ops.h" | |||||
| #include "MEM_guardedalloc.h" | #include "MEM_guardedalloc.h" | ||||
| #include "BLI_utildefines.h" | #include "BLI_utildefines.h" | ||||
| #include "gpu_backend.hh" | #include "gpu_backend.hh" | ||||
| #include "gpu_index_buffer_private.hh" | #include "gpu_index_buffer_private.hh" | ||||
| ▲ Show 20 Lines • Show All 370 Lines • ▼ Show 20 Lines | |||||
| } | } | ||||
| void GPU_indexbuf_bind_as_ssbo(GPUIndexBuf *elem, int binding) | void GPU_indexbuf_bind_as_ssbo(GPUIndexBuf *elem, int binding) | ||||
| { | { | ||||
| unwrap(elem)->bind_as_ssbo(binding); | unwrap(elem)->bind_as_ssbo(binding); | ||||
| } | } | ||||
| /** \} */ | /** \} */ | ||||
| /* -------------------------------------------------------------------- */ | |||||
| /** \name GPUIndexBufBuilderAsync | |||||
| * \{ */ | |||||
| /* Maximum number of #GPUIndexBufBuilderAsync initialized but not finished at the same time. */ | |||||
| #define MAX_BUILDER_ASYNC 8 | |||||
| typedef struct GPUIndexBufBuilderLink { | |||||
| struct GPUIndexBufBuilderLink *next, *prev; | |||||
| GPUIndexBufBuilder builder; | |||||
| } GPUIndexBufBuilderLink; | |||||
| thread_local struct { | |||||
| uint32_t thread_id; | |||||
| GPUIndexBufBuilderLink *builder_cpy; | |||||
| } g_local_data[MAX_BUILDER_ASYNC] = {{0}}; | |||||
| static uint g_current_thread_id_used[MAX_BUILDER_ASYNC] = {0}; | |||||
| static uint32_t g_last_thread_id = 1; | |||||
| static uint gpu_indexbuf_async_find_unused_slot(void) | |||||
| { | |||||
| for (uint slot = 0; slot < ARRAY_SIZE(g_current_thread_id_used); slot++) { | |||||
| if (atomic_cas_uint32(&g_current_thread_id_used[slot], 0, 1) == 0) { | |||||
| return slot; | |||||
| } | |||||
| } | |||||
| BLI_assert("Are more slots really needed?!"); | |||||
| return (uint)-1; | |||||
| } | |||||
| static GPUIndexBufBuilderLink *gpu_indexbuf_async_builder_alloc( | |||||
| GPUIndexBufBuilderAsync *builder_async) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy; | |||||
| builder_cpy = (GPUIndexBufBuilderLink *)MEM_mallocN(sizeof(*builder_cpy), __func__); | |||||
| g_local_data[builder_async->slot].thread_id = builder_async->thread_local_id; | |||||
| g_local_data[builder_async->slot].builder_cpy = builder_cpy; | |||||
| /* #builder is not written, so no mutex is needed here. */ | |||||
| GPUIndexBufBuilder *builder = &builder_async->builder; | |||||
| memcpy(&builder_cpy->builder, builder, sizeof(*builder)); | |||||
| BLI_mutex_lock(&builder_async->mutex); | |||||
| BLI_addtail(&builder_async->builder_cpy_list, builder_cpy); | |||||
| BLI_mutex_unlock(&builder_async->mutex); | |||||
| return builder_cpy; | |||||
| } | |||||
| BLI_INLINE GPUIndexBufBuilderLink *gpu_indexbuf_async_builder_cpy_get( | |||||
| GPUIndexBufBuilderAsync *builder_async) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy = g_local_data[builder_async->slot].builder_cpy; | |||||
| if (g_local_data[builder_async->slot].thread_id != builder_async->thread_local_id) { | |||||
| builder_cpy = gpu_indexbuf_async_builder_alloc(builder_async); | |||||
| } | |||||
| return builder_cpy; | |||||
| } | |||||
| /* Init in single-thread. Fill in multi-htread. */ | |||||
| void GPU_indexbuf_async_init_ex(GPUIndexBufBuilderAsync *builder_async, | |||||
| GPUPrimType prim_type, | |||||
| uint index_len, | |||||
| uint vertex_len) | |||||
| { | |||||
| GPUIndexBufBuilder *builder = &builder_async->builder; | |||||
| GPU_indexbuf_init_ex(builder, prim_type, index_len, vertex_len); | |||||
| builder_async->slot = gpu_indexbuf_async_find_unused_slot(); | |||||
| builder_async->thread_local_id = atomic_fetch_and_add_uint32(&g_last_thread_id, 1); | |||||
| if (builder_async->thread_local_id == 0) { | |||||
| /* Forbidden number. | |||||
| * The #slot value can't be zero. And #g_local_data inits with #thread_id 0. */ | |||||
| builder_async->thread_local_id = atomic_fetch_and_add_uint32(&g_last_thread_id, 1); | |||||
| } | |||||
| BLI_mutex_init(&builder_async->mutex); | |||||
| BLI_listbase_clear(&builder_async->builder_cpy_list); | |||||
| gpu_indexbuf_async_builder_alloc(builder_async); | |||||
| } | |||||
| void GPU_indexbuf_async_init(GPUIndexBufBuilderAsync *builder_async, | |||||
| GPUPrimType prim_type, | |||||
| uint prim_len, | |||||
| uint vertex_len) | |||||
| { | |||||
| int verts_per_prim = GPU_indexbuf_primitive_len(prim_type); | |||||
| #if TRUST_NO_ONE | |||||
| assert(verts_per_prim != -1); | |||||
| #endif | |||||
| GPU_indexbuf_async_init_ex( | |||||
| builder_async, prim_type, prim_len * (uint)verts_per_prim, vertex_len); | |||||
| } | |||||
| void GPU_indexbuf_async_set_point_vert(GPUIndexBufBuilderAsync *builder_async, uint elem, uint v1) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy = gpu_indexbuf_async_builder_cpy_get(builder_async); | |||||
| GPU_indexbuf_set_point_vert(&builder_cpy->builder, elem, v1); | |||||
| } | |||||
| void GPU_indexbuf_async_set_line_verts(GPUIndexBufBuilderAsync *builder_async, | |||||
| uint elem, | |||||
| uint v1, | |||||
| uint v2) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy = gpu_indexbuf_async_builder_cpy_get(builder_async); | |||||
| GPU_indexbuf_set_line_verts(&builder_cpy->builder, elem, v1, v2); | |||||
| } | |||||
| void GPU_indexbuf_async_set_tri_verts( | |||||
| GPUIndexBufBuilderAsync *builder_async, uint elem, uint v1, uint v2, uint v3) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy = gpu_indexbuf_async_builder_cpy_get(builder_async); | |||||
| GPU_indexbuf_set_tri_verts(&builder_cpy->builder, elem, v1, v2, v3); | |||||
| } | |||||
| void GPU_indexbuf_async_set_point_restart(GPUIndexBufBuilderAsync *builder_async, uint elem) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy = gpu_indexbuf_async_builder_cpy_get(builder_async); | |||||
| GPU_indexbuf_set_point_restart(&builder_cpy->builder, elem); | |||||
| } | |||||
| void GPU_indexbuf_async_set_line_restart(GPUIndexBufBuilderAsync *builder_async, uint elem) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy = gpu_indexbuf_async_builder_cpy_get(builder_async); | |||||
| GPU_indexbuf_set_line_restart(&builder_cpy->builder, elem); | |||||
| } | |||||
| void GPU_indexbuf_async_set_tri_restart(GPUIndexBufBuilderAsync *builder_async, uint elem) | |||||
| { | |||||
| GPUIndexBufBuilderLink *builder_cpy = gpu_indexbuf_async_builder_cpy_get(builder_async); | |||||
| GPU_indexbuf_set_tri_restart(&builder_cpy->builder, elem); | |||||
| } | |||||
| void GPU_indexbuf_async_build_in_place(GPUIndexBufBuilderAsync *builder_async, GPUIndexBuf *elem) | |||||
| { | |||||
| /* Release the slot. */ | |||||
| g_current_thread_id_used[builder_async->slot] = 0; | |||||
| GPUIndexBufBuilder *builder = &builder_async->builder; | |||||
| GPUIndexBufBuilderLink *builder_cpy; | |||||
| while (builder_cpy = (GPUIndexBufBuilderLink *)BLI_pophead(&builder_async->builder_cpy_list)) { | |||||
| builder->index_len = MAX2(builder->index_len, builder_cpy->builder.index_len); | |||||
| MEM_freeN(builder_cpy); | |||||
| } | |||||
| GPU_indexbuf_build_in_place(builder, elem); | |||||
| } | |||||
| /** \} */ | |||||