Changeset View
Changeset View
Standalone View
Standalone View
source/blender/draw/intern/draw_manager_data.cc
- This file was moved from source/blender/draw/intern/draw_manager_data.c.
| /* SPDX-License-Identifier: GPL-2.0-or-later | /* SPDX-License-Identifier: GPL-2.0-or-later | ||||
| * Copyright 2016 Blender Foundation. */ | * Copyright 2016 Blender Foundation. */ | ||||
| /** \file | /** \file | ||||
| * \ingroup draw | * \ingroup draw | ||||
| */ | */ | ||||
| #include "DRW_pbvh.h" | |||||
| #include "draw_attributes.h" | |||||
| #include "draw_manager.h" | #include "draw_manager.h" | ||||
| #include "draw_pbvh.h" | |||||
| #include "BKE_curve.h" | #include "BKE_curve.h" | ||||
| #include "BKE_duplilist.h" | #include "BKE_duplilist.h" | ||||
| #include "BKE_global.h" | #include "BKE_global.h" | ||||
| #include "BKE_image.h" | #include "BKE_image.h" | ||||
| #include "BKE_mesh.h" | #include "BKE_mesh.h" | ||||
| #include "BKE_object.h" | #include "BKE_object.h" | ||||
| #include "BKE_paint.h" | #include "BKE_paint.h" | ||||
| #include "BKE_pbvh.h" | #include "BKE_pbvh.h" | ||||
| #include "BKE_volume.h" | #include "BKE_volume.h" | ||||
| /* For debug cursor position. */ | /* For debug cursor position. */ | ||||
| #include "WM_api.h" | #include "WM_api.h" | ||||
| #include "wm_window.h" | #include "wm_window.h" | ||||
| #include "DNA_curve_types.h" | #include "DNA_curve_types.h" | ||||
| #include "DNA_mesh_types.h" | #include "DNA_mesh_types.h" | ||||
| #include "DNA_meta_types.h" | #include "DNA_meta_types.h" | ||||
| #include "DNA_screen_types.h" | #include "DNA_screen_types.h" | ||||
| #include "BLI_alloca.h" | #include "BLI_array.hh" | ||||
| #include "BLI_hash.h" | #include "BLI_hash.h" | ||||
| #include "BLI_link_utils.h" | #include "BLI_link_utils.h" | ||||
| #include "BLI_listbase.h" | #include "BLI_listbase.h" | ||||
| #include "BLI_math_bits.h" | |||||
| #include "BLI_memblock.h" | #include "BLI_memblock.h" | ||||
| #include "BLI_mempool.h" | #include "BLI_mempool.h" | ||||
| #ifdef DRW_DEBUG_CULLING | #ifdef DRW_DEBUG_CULLING | ||||
| # include "BLI_math_bits.h" | # include "BLI_math_bits.h" | ||||
| #endif | #endif | ||||
| #include "GPU_buffers.h" | |||||
| #include "GPU_capabilities.h" | #include "GPU_capabilities.h" | ||||
| #include "GPU_material.h" | #include "GPU_material.h" | ||||
| #include "GPU_uniform_buffer.h" | #include "GPU_uniform_buffer.h" | ||||
| #include "intern/gpu_codegen.h" | #include "intern/gpu_codegen.h" | ||||
| /** | /** | ||||
| * IMPORTANT: | * IMPORTANT: | ||||
| * In order to be able to write to the same print buffer sequentially, we add a barrier to allow | * In order to be able to write to the same print buffer sequentially, we add a barrier to allow | ||||
| * multiple shader calls writing to the same buffer. | * multiple shader calls writing to the same buffer. | ||||
| * However, this adds explicit synchronization events which might change the rest of the | * However, this adds explicit synchronization events which might change the rest of the | ||||
| * application behavior and hide some bugs. If you know you are using shader debug print in only | * application behavior and hide some bugs. If you know you are using shader debug print in only | ||||
| * one shader pass, you can comment this out to remove the aforementioned barrier. | * one shader pass, you can comment this out to remove the aforementioned barrier. | ||||
| */ | */ | ||||
| #define DISABLE_DEBUG_SHADER_PRINT_BARRIER | #define DISABLE_DEBUG_SHADER_PRINT_BARRIER | ||||
| /* -------------------------------------------------------------------- */ | /* -------------------------------------------------------------------- */ | ||||
| /** \name Uniform Buffer Object (DRW_uniformbuffer) | /** \name Uniform Buffer Object (DRW_uniformbuffer) | ||||
| * \{ */ | * \{ */ | ||||
| static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len) | static void draw_call_sort(DRWCommand *array, DRWCommand *array_tmp, int array_len) | ||||
| { | { | ||||
| /* Count unique batches. Tt's not really important if | /* Count unique batches. It's not really important if | ||||
| * there is collisions. If there is a lot of different batches, | * there is collisions. If there is a lot of different batches, | ||||
| * the sorting benefit will be negligible. | * the sorting benefit will be negligible. | ||||
| * So at least sort fast! */ | * So at least sort fast! */ | ||||
| uchar idx[128] = {0}; | uchar idx[128] = {0}; | ||||
| /* Shift by 6 positions knowing each GPUBatch is > 64 bytes */ | /* Shift by 6 positions knowing each GPUBatch is > 64 bytes */ | ||||
| #define KEY(a) ((((size_t)((a).draw.batch)) >> 6) % ARRAY_SIZE(idx)) | #define KEY(a) ((size_t((a).draw.batch) >> 6) % ARRAY_SIZE(idx)) | ||||
| BLI_assert(array_len <= ARRAY_SIZE(idx)); | BLI_assert(array_len <= ARRAY_SIZE(idx)); | ||||
| for (int i = 0; i < array_len; i++) { | for (int i = 0; i < array_len; i++) { | ||||
| /* Early out if nothing to sort. */ | /* Early out if nothing to sort. */ | ||||
| if (++idx[KEY(array[i])] == array_len) { | if (++idx[KEY(array[i])] == array_len) { | ||||
| return; | return; | ||||
| } | } | ||||
| } | } | ||||
| Show All 15 Lines | void drw_resource_buffer_finish(DRWData *vmempool) | ||||
| int chunk_id = DRW_handle_chunk_get(&DST.resource_handle); | int chunk_id = DRW_handle_chunk_get(&DST.resource_handle); | ||||
| int elem_id = DRW_handle_id_get(&DST.resource_handle); | int elem_id = DRW_handle_id_get(&DST.resource_handle); | ||||
| int ubo_len = 1 + chunk_id - ((elem_id == 0) ? 1 : 0); | int ubo_len = 1 + chunk_id - ((elem_id == 0) ? 1 : 0); | ||||
| size_t list_size = sizeof(GPUUniformBuf *) * ubo_len; | size_t list_size = sizeof(GPUUniformBuf *) * ubo_len; | ||||
| /* TODO: find a better system. currently a lot of obinfos UBO are going to be unused | /* TODO: find a better system. currently a lot of obinfos UBO are going to be unused | ||||
| * if not rendering with Eevee. */ | * if not rendering with Eevee. */ | ||||
| if (vmempool->matrices_ubo == NULL) { | if (vmempool->matrices_ubo == nullptr) { | ||||
| vmempool->matrices_ubo = MEM_callocN(list_size, __func__); | vmempool->matrices_ubo = static_cast<GPUUniformBuf **>(MEM_callocN(list_size, __func__)); | ||||
| vmempool->obinfos_ubo = MEM_callocN(list_size, __func__); | vmempool->obinfos_ubo = static_cast<GPUUniformBuf **>(MEM_callocN(list_size, __func__)); | ||||
| vmempool->ubo_len = ubo_len; | vmempool->ubo_len = ubo_len; | ||||
| } | } | ||||
| /* Remove unnecessary buffers */ | /* Remove unnecessary buffers */ | ||||
| for (int i = ubo_len; i < vmempool->ubo_len; i++) { | for (int i = ubo_len; i < vmempool->ubo_len; i++) { | ||||
| GPU_uniformbuf_free(vmempool->matrices_ubo[i]); | GPU_uniformbuf_free(vmempool->matrices_ubo[i]); | ||||
| GPU_uniformbuf_free(vmempool->obinfos_ubo[i]); | GPU_uniformbuf_free(vmempool->obinfos_ubo[i]); | ||||
| } | } | ||||
| if (ubo_len != vmempool->ubo_len) { | if (ubo_len != vmempool->ubo_len) { | ||||
| vmempool->matrices_ubo = MEM_recallocN(vmempool->matrices_ubo, list_size); | vmempool->matrices_ubo = static_cast<GPUUniformBuf **>( | ||||
| vmempool->obinfos_ubo = MEM_recallocN(vmempool->obinfos_ubo, list_size); | MEM_recallocN(vmempool->matrices_ubo, list_size)); | ||||
| vmempool->obinfos_ubo = static_cast<GPUUniformBuf **>( | |||||
| MEM_recallocN(vmempool->obinfos_ubo, list_size)); | |||||
| vmempool->ubo_len = ubo_len; | vmempool->ubo_len = ubo_len; | ||||
| } | } | ||||
| /* Create/Update buffers. */ | /* Create/Update buffers. */ | ||||
| for (int i = 0; i < ubo_len; i++) { | for (int i = 0; i < ubo_len; i++) { | ||||
| void *data_obmat = BLI_memblock_elem_get(vmempool->obmats, i, 0); | void *data_obmat = BLI_memblock_elem_get(vmempool->obmats, i, 0); | ||||
| void *data_infos = BLI_memblock_elem_get(vmempool->obinfos, i, 0); | void *data_infos = BLI_memblock_elem_get(vmempool->obinfos, i, 0); | ||||
| if (vmempool->matrices_ubo[i] == NULL) { | if (vmempool->matrices_ubo[i] == nullptr) { | ||||
| vmempool->matrices_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectMatrix) * | vmempool->matrices_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectMatrix) * | ||||
| DRW_RESOURCE_CHUNK_LEN); | DRW_RESOURCE_CHUNK_LEN); | ||||
| vmempool->obinfos_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectInfos) * | vmempool->obinfos_ubo[i] = GPU_uniformbuf_create(sizeof(DRWObjectInfos) * | ||||
| DRW_RESOURCE_CHUNK_LEN); | DRW_RESOURCE_CHUNK_LEN); | ||||
| } | } | ||||
| GPU_uniformbuf_update(vmempool->matrices_ubo[i], data_obmat); | GPU_uniformbuf_update(vmempool->matrices_ubo[i], data_obmat); | ||||
| GPU_uniformbuf_update(vmempool->obinfos_ubo[i], data_infos); | GPU_uniformbuf_update(vmempool->obinfos_ubo[i], data_infos); | ||||
| } | } | ||||
| DRW_uniform_attrs_pool_flush_all(vmempool->obattrs_ubo_pool); | DRW_uniform_attrs_pool_flush_all(vmempool->obattrs_ubo_pool); | ||||
| /* Aligned alloc to avoid unaligned memcpy. */ | /* Aligned alloc to avoid unaligned memcpy. */ | ||||
| DRWCommandChunk *chunk_tmp = MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, "tmp call chunk"); | DRWCommandChunk *chunk_tmp = static_cast<DRWCommandChunk *>( | ||||
| MEM_mallocN_aligned(sizeof(DRWCommandChunk), 16, __func__)); | |||||
| DRWCommandChunk *chunk; | DRWCommandChunk *chunk; | ||||
| BLI_memblock_iter iter; | BLI_memblock_iter iter; | ||||
| BLI_memblock_iternew(vmempool->commands, &iter); | BLI_memblock_iternew(vmempool->commands, &iter); | ||||
| while ((chunk = BLI_memblock_iterstep(&iter))) { | while ((chunk = static_cast<DRWCommandChunk *>(BLI_memblock_iterstep(&iter)))) { | ||||
| bool sortable = true; | bool sortable = true; | ||||
| /* We can only sort chunks that contain #DRWCommandDraw only. */ | /* We can only sort chunks that contain #DRWCommandDraw only. */ | ||||
| for (int i = 0; i < ARRAY_SIZE(chunk->command_type) && sortable; i++) { | for (int i = 0; i < ARRAY_SIZE(chunk->command_type) && sortable; i++) { | ||||
| if (chunk->command_type[i] != 0) { | if (chunk->command_type[i] != 0) { | ||||
| sortable = false; | sortable = false; | ||||
| } | } | ||||
| } | } | ||||
| if (sortable) { | if (sortable) { | ||||
| Show All 21 Lines | if (loc == -1) { | ||||
| /* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */ | /* Nice to enable eventually, for now EEVEE uses uniforms that might not exist. */ | ||||
| // BLI_assert(0); | // BLI_assert(0); | ||||
| return; | return; | ||||
| } | } | ||||
| DRWUniformChunk *unichunk = shgroup->uniforms; | DRWUniformChunk *unichunk = shgroup->uniforms; | ||||
| /* Happens on first uniform or if chunk is full. */ | /* Happens on first uniform or if chunk is full. */ | ||||
| if (!unichunk || unichunk->uniform_used == unichunk->uniform_len) { | if (!unichunk || unichunk->uniform_used == unichunk->uniform_len) { | ||||
| unichunk = BLI_memblock_alloc(DST.vmempool->uniforms); | unichunk = static_cast<DRWUniformChunk *>(BLI_memblock_alloc(DST.vmempool->uniforms)); | ||||
| unichunk->uniform_len = ARRAY_SIZE(shgroup->uniforms->uniforms); | unichunk->uniform_len = ARRAY_SIZE(shgroup->uniforms->uniforms); | ||||
| unichunk->uniform_used = 0; | unichunk->uniform_used = 0; | ||||
| BLI_LINKS_PREPEND(shgroup->uniforms, unichunk); | BLI_LINKS_PREPEND(shgroup->uniforms, unichunk); | ||||
| } | } | ||||
| DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used++; | DRWUniform *uni = unichunk->uniforms + unichunk->uniform_used++; | ||||
| uni->location = loc; | uni->location = loc; | ||||
| ▲ Show 20 Lines • Show All 47 Lines • ▼ Show 20 Lines | static void drw_shgroup_uniform(DRWShadingGroup *shgroup, | ||||
| BLI_assert(!ELEM(type, | BLI_assert(!ELEM(type, | ||||
| DRW_UNIFORM_STORAGE_BLOCK, | DRW_UNIFORM_STORAGE_BLOCK, | ||||
| DRW_UNIFORM_STORAGE_BLOCK_REF, | DRW_UNIFORM_STORAGE_BLOCK_REF, | ||||
| DRW_UNIFORM_BLOCK, | DRW_UNIFORM_BLOCK, | ||||
| DRW_UNIFORM_BLOCK_REF, | DRW_UNIFORM_BLOCK_REF, | ||||
| DRW_UNIFORM_TEXTURE, | DRW_UNIFORM_TEXTURE, | ||||
| DRW_UNIFORM_TEXTURE_REF)); | DRW_UNIFORM_TEXTURE_REF)); | ||||
| int location = GPU_shader_get_uniform(shgroup->shader, name); | int location = GPU_shader_get_uniform(shgroup->shader, name); | ||||
| drw_shgroup_uniform_create_ex(shgroup, location, type, value, 0, length, arraysize); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, location, type, value, GPU_SAMPLER_DEFAULT, length, arraysize); | |||||
| } | } | ||||
| void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_uniform_texture_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| const GPUTexture *tex, | const GPUTexture *tex, | ||||
| eGPUSamplerState sampler_state) | eGPUSamplerState sampler_state) | ||||
| { | { | ||||
| BLI_assert(tex != NULL); | BLI_assert(tex != nullptr); | ||||
| int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE, tex, sampler_state, 0, 1); | drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE, tex, sampler_state, 0, 1); | ||||
| } | } | ||||
| void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex) | void DRW_shgroup_uniform_texture(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex) | ||||
| { | { | ||||
| DRW_shgroup_uniform_texture_ex(shgroup, name, tex, GPU_SAMPLER_MAX); | DRW_shgroup_uniform_texture_ex(shgroup, name, tex, GPU_SAMPLER_MAX); | ||||
| } | } | ||||
| void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_uniform_texture_ref_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| GPUTexture **tex, | GPUTexture **tex, | ||||
| eGPUSamplerState sampler_state) | eGPUSamplerState sampler_state) | ||||
| { | { | ||||
| BLI_assert(tex != NULL); | BLI_assert(tex != nullptr); | ||||
| int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE_REF, tex, sampler_state, 0, 1); | drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_TEXTURE_REF, tex, sampler_state, 0, 1); | ||||
| } | } | ||||
| void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex) | void DRW_shgroup_uniform_texture_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex) | ||||
| { | { | ||||
| DRW_shgroup_uniform_texture_ref_ex(shgroup, name, tex, GPU_SAMPLER_MAX); | DRW_shgroup_uniform_texture_ref_ex(shgroup, name, tex, GPU_SAMPLER_MAX); | ||||
| } | } | ||||
| void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex) | void DRW_shgroup_uniform_image(DRWShadingGroup *shgroup, const char *name, const GPUTexture *tex) | ||||
| { | { | ||||
| BLI_assert(tex != NULL); | BLI_assert(tex != nullptr); | ||||
| int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE, tex, 0, 0, 1); | drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE, tex, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| } | } | ||||
| void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex) | void DRW_shgroup_uniform_image_ref(DRWShadingGroup *shgroup, const char *name, GPUTexture **tex) | ||||
| { | { | ||||
| BLI_assert(tex != NULL); | BLI_assert(tex != nullptr); | ||||
| int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | int loc = GPU_shader_get_texture_binding(shgroup->shader, name); | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, 0, 0, 1); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, loc, DRW_UNIFORM_IMAGE_REF, tex, GPU_SAMPLER_DEFAULT, 0, 1); | |||||
| } | } | ||||
| void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_uniform_block_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| const GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS) | const GPUUniformBuf *ubo DRW_DEBUG_FILE_LINE_ARGS) | ||||
| { | { | ||||
| BLI_assert(ubo != NULL); | BLI_assert(ubo != nullptr); | ||||
| int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name); | int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name); | ||||
| if (loc == -1) { | if (loc == -1) { | ||||
| #ifdef DRW_UNUSED_RESOURCE_TRACKING | #ifdef DRW_UNUSED_RESOURCE_TRACKING | ||||
| printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n", | printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n", | ||||
| file, | file, | ||||
| line, | line, | ||||
| name); | name); | ||||
| #else | #else | ||||
| /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | ||||
| // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects."); | // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects."); | ||||
| #endif | #endif | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, 0, 0, 1); | drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK, ubo, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| } | } | ||||
| void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_uniform_block_ref_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS) | GPUUniformBuf **ubo DRW_DEBUG_FILE_LINE_ARGS) | ||||
| { | { | ||||
| BLI_assert(ubo != NULL); | BLI_assert(ubo != nullptr); | ||||
| int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name); | int loc = GPU_shader_get_uniform_block_binding(shgroup->shader, name); | ||||
| if (loc == -1) { | if (loc == -1) { | ||||
| #ifdef DRW_UNUSED_RESOURCE_TRACKING | #ifdef DRW_UNUSED_RESOURCE_TRACKING | ||||
| printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n", | printf("%s:%d: Unable to locate binding of shader uniform buffer object: %s.\n", | ||||
| file, | file, | ||||
| line, | line, | ||||
| name); | name); | ||||
| #else | #else | ||||
| /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | ||||
| // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects."); | // BLI_assert_msg(0, "Unable to locate binding of shader uniform buffer objects."); | ||||
| #endif | #endif | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, 0, 0, 1); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, loc, DRW_UNIFORM_BLOCK_REF, ubo, GPU_SAMPLER_DEFAULT, 0, 1); | |||||
| } | } | ||||
| void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_storage_block_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| const GPUStorageBuf *ssbo DRW_DEBUG_FILE_LINE_ARGS) | const GPUStorageBuf *ssbo DRW_DEBUG_FILE_LINE_ARGS) | ||||
| { | { | ||||
| BLI_assert(ssbo != NULL); | BLI_assert(ssbo != nullptr); | ||||
| /* TODO(@fclem): Fix naming inconsistency. */ | /* TODO(@fclem): Fix naming inconsistency. */ | ||||
| int loc = GPU_shader_get_ssbo(shgroup->shader, name); | int loc = GPU_shader_get_ssbo(shgroup->shader, name); | ||||
| if (loc == -1) { | if (loc == -1) { | ||||
| #ifdef DRW_UNUSED_RESOURCE_TRACKING | #ifdef DRW_UNUSED_RESOURCE_TRACKING | ||||
| printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | ||||
| file, | file, | ||||
| line, | line, | ||||
| name); | name); | ||||
| #else | #else | ||||
| /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | ||||
| // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | ||||
| #endif | #endif | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK, ssbo, 0, 0, 1); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK, ssbo, GPU_SAMPLER_DEFAULT, 0, 1); | |||||
| } | } | ||||
| void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_storage_block_ref_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| GPUStorageBuf **ssbo DRW_DEBUG_FILE_LINE_ARGS) | GPUStorageBuf **ssbo DRW_DEBUG_FILE_LINE_ARGS) | ||||
| { | { | ||||
| BLI_assert(ssbo != NULL); | BLI_assert(ssbo != nullptr); | ||||
| /* TODO(@fclem): Fix naming inconsistency. */ | /* TODO(@fclem): Fix naming inconsistency. */ | ||||
| int loc = GPU_shader_get_ssbo(shgroup->shader, name); | int loc = GPU_shader_get_ssbo(shgroup->shader, name); | ||||
| if (loc == -1) { | if (loc == -1) { | ||||
| #ifdef DRW_UNUSED_RESOURCE_TRACKING | #ifdef DRW_UNUSED_RESOURCE_TRACKING | ||||
| printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | ||||
| file, | file, | ||||
| line, | line, | ||||
| name); | name); | ||||
| #else | #else | ||||
| /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | /* TODO(@fclem): Would be good to have, but eevee has too much of this for the moment. */ | ||||
| // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | // BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | ||||
| #endif | #endif | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex(shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK_REF, ssbo, 0, 0, 1); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, loc, DRW_UNIFORM_STORAGE_BLOCK_REF, ssbo, GPU_SAMPLER_DEFAULT, 0, 1); | |||||
| } | } | ||||
| void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, | void DRW_shgroup_uniform_bool(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| const int *value, | const int *value, | ||||
| int arraysize) | int arraysize) | ||||
| { | { | ||||
| /* Boolean are expected to be 4bytes longs for OpenGL! */ | /* Boolean are expected to be 4bytes longs for OpenGL! */ | ||||
| ▲ Show 20 Lines • Show All 132 Lines • ▼ Show 20 Lines | if (location == -1) { | ||||
| return; | return; | ||||
| } | } | ||||
| /* Each array element stored as an individual entry in the uniform list. | /* Each array element stored as an individual entry in the uniform list. | ||||
| * All entries from the same array share the same base location, | * All entries from the same array share the same base location, | ||||
| * and array-size used to determine the number of elements | * and array-size used to determine the number of elements | ||||
| * copied in draw_update_uniforms. */ | * copied in draw_update_uniforms. */ | ||||
| for (int i = 0; i < 4; i++) { | for (int i = 0; i < 4; i++) { | ||||
| drw_shgroup_uniform_create_ex(shgroup, location, DRW_UNIFORM_FLOAT_COPY, &value[i], 0, 4, 4); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, location, DRW_UNIFORM_FLOAT_COPY, &value[i], GPU_SAMPLER_DEFAULT, 4, 4); | |||||
| } | } | ||||
| } | } | ||||
| void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_vertex_buffer_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| GPUVertBuf *vertex_buffer DRW_DEBUG_FILE_LINE_ARGS) | GPUVertBuf *vertex_buffer DRW_DEBUG_FILE_LINE_ARGS) | ||||
| { | { | ||||
| int location = GPU_shader_get_ssbo(shgroup->shader, name); | int location = GPU_shader_get_ssbo(shgroup->shader, name); | ||||
| if (location == -1) { | if (location == -1) { | ||||
| #ifdef DRW_UNUSED_RESOURCE_TRACKING | #ifdef DRW_UNUSED_RESOURCE_TRACKING | ||||
| printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | ||||
| file, | file, | ||||
| line, | line, | ||||
| name); | name); | ||||
| #else | #else | ||||
| BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | ||||
| #endif | #endif | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex(shgroup, | ||||
| shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE, vertex_buffer, 0, 0, 1); | location, | ||||
| DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE, | |||||
| vertex_buffer, | |||||
| GPU_SAMPLER_DEFAULT, | |||||
| 0, | |||||
| 1); | |||||
| } | } | ||||
| void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_vertex_buffer_ref_ex(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| GPUVertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS) | GPUVertBuf **vertex_buffer DRW_DEBUG_FILE_LINE_ARGS) | ||||
| { | { | ||||
| int location = GPU_shader_get_ssbo(shgroup->shader, name); | int location = GPU_shader_get_ssbo(shgroup->shader, name); | ||||
| if (location == -1) { | if (location == -1) { | ||||
| #ifdef DRW_UNUSED_RESOURCE_TRACKING | #ifdef DRW_UNUSED_RESOURCE_TRACKING | ||||
| printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | printf("%s:%d: Unable to locate binding of shader storage buffer object: %s.\n", | ||||
| file, | file, | ||||
| line, | line, | ||||
| name); | name); | ||||
| #else | #else | ||||
| BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | BLI_assert_msg(0, "Unable to locate binding of shader storage buffer objects."); | ||||
| #endif | #endif | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex(shgroup, | ||||
| shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF, vertex_buffer, 0, 0, 1); | location, | ||||
| DRW_UNIFORM_VERTEX_BUFFER_AS_STORAGE_REF, | |||||
| vertex_buffer, | |||||
| GPU_SAMPLER_DEFAULT, | |||||
| 0, | |||||
| 1); | |||||
| } | } | ||||
| void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup, | void DRW_shgroup_buffer_texture(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| GPUVertBuf *vertex_buffer) | GPUVertBuf *vertex_buffer) | ||||
| { | { | ||||
| int location = GPU_shader_get_ssbo(shgroup->shader, name); | int location = GPU_shader_get_ssbo(shgroup->shader, name); | ||||
| if (location == -1) { | if (location == -1) { | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex(shgroup, | ||||
| shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE, vertex_buffer, 0, 0, 1); | location, | ||||
| DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE, | |||||
| vertex_buffer, | |||||
| GPU_SAMPLER_DEFAULT, | |||||
| 0, | |||||
| 1); | |||||
| } | } | ||||
| void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup, | void DRW_shgroup_buffer_texture_ref(DRWShadingGroup *shgroup, | ||||
| const char *name, | const char *name, | ||||
| GPUVertBuf **vertex_buffer) | GPUVertBuf **vertex_buffer) | ||||
| { | { | ||||
| int location = GPU_shader_get_ssbo(shgroup->shader, name); | int location = GPU_shader_get_ssbo(shgroup->shader, name); | ||||
| if (location == -1) { | if (location == -1) { | ||||
| return; | return; | ||||
| } | } | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex(shgroup, | ||||
| shgroup, location, DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF, vertex_buffer, 0, 0, 1); | location, | ||||
| DRW_UNIFORM_VERTEX_BUFFER_AS_TEXTURE_REF, | |||||
| vertex_buffer, | |||||
| GPU_SAMPLER_DEFAULT, | |||||
| 0, | |||||
| 1); | |||||
| } | } | ||||
| /** \} */ | /** \} */ | ||||
| /* -------------------------------------------------------------------- */ | /* -------------------------------------------------------------------- */ | ||||
| /** \name Draw Call (DRW_calls) | /** \name Draw Call (DRW_calls) | ||||
| * \{ */ | * \{ */ | ||||
| static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4]) | static void drw_call_calc_orco(Object *ob, float (*r_orcofacs)[4]) | ||||
| { | { | ||||
| ID *ob_data = (ob) ? ob->data : NULL; | ID *ob_data = (ob) ? static_cast<ID *>(ob->data) : nullptr; | ||||
| float loc[3], size[3]; | float loc[3], size[3]; | ||||
| float *texcoloc = NULL; | float *texcoloc = nullptr; | ||||
| float *texcosize = NULL; | float *texcosize = nullptr; | ||||
| if (ob_data != NULL) { | if (ob_data != nullptr) { | ||||
| switch (GS(ob_data->name)) { | switch (GS(ob_data->name)) { | ||||
| case ID_VO: { | case ID_VO: { | ||||
| BoundBox *bbox = BKE_volume_boundbox_get(ob); | BoundBox *bbox = BKE_volume_boundbox_get(ob); | ||||
| mid_v3_v3v3(loc, bbox->vec[0], bbox->vec[6]); | mid_v3_v3v3(loc, bbox->vec[0], bbox->vec[6]); | ||||
| sub_v3_v3v3(size, bbox->vec[0], bbox->vec[6]); | sub_v3_v3v3(size, bbox->vec[0], bbox->vec[6]); | ||||
| texcoloc = loc; | texcoloc = loc; | ||||
| texcosize = size; | texcosize = size; | ||||
| break; | break; | ||||
| } | } | ||||
| case ID_ME: | case ID_ME: | ||||
| BKE_mesh_texspace_get_reference((Mesh *)ob_data, NULL, &texcoloc, &texcosize); | BKE_mesh_texspace_get_reference((Mesh *)ob_data, nullptr, &texcoloc, &texcosize); | ||||
| break; | break; | ||||
| case ID_CU_LEGACY: { | case ID_CU_LEGACY: { | ||||
| Curve *cu = (Curve *)ob_data; | Curve *cu = (Curve *)ob_data; | ||||
| BKE_curve_texspace_ensure(cu); | BKE_curve_texspace_ensure(cu); | ||||
| texcoloc = cu->loc; | texcoloc = cu->loc; | ||||
| texcosize = cu->size; | texcosize = cu->size; | ||||
| break; | break; | ||||
| } | } | ||||
| case ID_MB: { | case ID_MB: { | ||||
| MetaBall *mb = (MetaBall *)ob_data; | MetaBall *mb = (MetaBall *)ob_data; | ||||
| texcoloc = mb->loc; | texcoloc = mb->loc; | ||||
| texcosize = mb->size; | texcosize = mb->size; | ||||
| break; | break; | ||||
| } | } | ||||
| default: | default: | ||||
| break; | break; | ||||
| } | } | ||||
| } | } | ||||
| if ((texcoloc != NULL) && (texcosize != NULL)) { | if ((texcoloc != nullptr) && (texcosize != nullptr)) { | ||||
| mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f); | mul_v3_v3fl(r_orcofacs[1], texcosize, 2.0f); | ||||
| invert_v3(r_orcofacs[1]); | invert_v3(r_orcofacs[1]); | ||||
| sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize); | sub_v3_v3v3(r_orcofacs[0], texcoloc, texcosize); | ||||
| negate_v3(r_orcofacs[0]); | negate_v3(r_orcofacs[0]); | ||||
| mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */ | mul_v3_v3(r_orcofacs[0], r_orcofacs[1]); /* result in a nice MADD in the shader */ | ||||
| } | } | ||||
| else { | else { | ||||
| copy_v3_fl(r_orcofacs[0], 0.0f); | copy_v3_fl(r_orcofacs[0], 0.0f); | ||||
| Show All 21 Lines | static void drw_call_obinfos_init(DRWObjectInfos *ob_infos, Object *ob) | ||||
| /* Orco factors. */ | /* Orco factors. */ | ||||
| drw_call_calc_orco(ob, ob_infos->orcotexfac); | drw_call_calc_orco(ob, ob_infos->orcotexfac); | ||||
| /* Random float value. */ | /* Random float value. */ | ||||
| uint random = (DST.dupli_source) ? | uint random = (DST.dupli_source) ? | ||||
| DST.dupli_source->random_id : | DST.dupli_source->random_id : | ||||
| /* TODO(fclem): this is rather costly to do at runtime. Maybe we can | /* TODO(fclem): this is rather costly to do at runtime. Maybe we can | ||||
| * put it in ob->runtime and make depsgraph ensure it is up to date. */ | * put it in ob->runtime and make depsgraph ensure it is up to date. */ | ||||
| BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0); | BLI_hash_int_2d(BLI_hash_string(ob->id.name + 2), 0); | ||||
| ob_infos->ob_random = random * (1.0f / (float)0xFFFFFFFF); | ob_infos->ob_random = random * (1.0f / float(0xFFFFFFFF)); | ||||
| /* Object State. */ | /* Object State. */ | ||||
| ob_infos->ob_flag = 1.0f; /* Required to have a correct sign */ | ob_infos->ob_flag = 1.0f; /* Required to have a correct sign */ | ||||
| ob_infos->ob_flag += (ob->base_flag & BASE_SELECTED) ? (1 << 1) : 0; | ob_infos->ob_flag += (ob->base_flag & BASE_SELECTED) ? (1 << 1) : 0; | ||||
| ob_infos->ob_flag += (ob->base_flag & BASE_FROM_DUPLI) ? (1 << 2) : 0; | ob_infos->ob_flag += (ob->base_flag & BASE_FROM_DUPLI) ? (1 << 2) : 0; | ||||
| ob_infos->ob_flag += (ob->base_flag & BASE_FROM_SET) ? (1 << 3) : 0; | ob_infos->ob_flag += (ob->base_flag & BASE_FROM_SET) ? (1 << 3) : 0; | ||||
| if (ob->base_flag & BASE_FROM_DUPLI) { | if (ob->base_flag & BASE_FROM_DUPLI) { | ||||
| ob_infos->ob_flag += (DRW_object_get_dupli_parent(ob) == DST.draw_ctx.obact) ? (1 << 4) : 0; | ob_infos->ob_flag += (DRW_object_get_dupli_parent(ob) == DST.draw_ctx.obact) ? (1 << 4) : 0; | ||||
| } | } | ||||
| else { | else { | ||||
| ob_infos->ob_flag += (ob == DST.draw_ctx.obact) ? (1 << 4) : 0; | ob_infos->ob_flag += (ob == DST.draw_ctx.obact) ? (1 << 4) : 0; | ||||
| } | } | ||||
| /* Negative scaling. */ | /* Negative scaling. */ | ||||
| ob_infos->ob_flag *= (ob->transflag & OB_NEG_SCALE) ? -1.0f : 1.0f; | ob_infos->ob_flag *= (ob->transflag & OB_NEG_SCALE) ? -1.0f : 1.0f; | ||||
| /* Object Color. */ | /* Object Color. */ | ||||
| copy_v4_v4(ob_infos->ob_color, ob->color); | copy_v4_v4(ob_infos->ob_color, ob->color); | ||||
| } | } | ||||
| static void drw_call_culling_init(DRWCullingState *cull, Object *ob) | static void drw_call_culling_init(DRWCullingState *cull, Object *ob) | ||||
| { | { | ||||
| const BoundBox *bbox; | const BoundBox *bbox; | ||||
| if (ob != NULL && (bbox = BKE_object_boundbox_get(ob))) { | if (ob != nullptr && (bbox = BKE_object_boundbox_get(ob))) { | ||||
| float corner[3]; | float corner[3]; | ||||
| /* Get BoundSphere center and radius from the BoundBox. */ | /* Get BoundSphere center and radius from the BoundBox. */ | ||||
| mid_v3_v3v3(cull->bsphere.center, bbox->vec[0], bbox->vec[6]); | mid_v3_v3v3(cull->bsphere.center, bbox->vec[0], bbox->vec[6]); | ||||
| mul_v3_m4v3(corner, ob->obmat, bbox->vec[0]); | mul_v3_m4v3(corner, ob->obmat, bbox->vec[0]); | ||||
| mul_m4_v3(ob->obmat, cull->bsphere.center); | mul_m4_v3(ob->obmat, cull->bsphere.center); | ||||
| cull->bsphere.radius = len_v3v3(cull->bsphere.center, corner); | cull->bsphere.radius = len_v3v3(cull->bsphere.center, corner); | ||||
| /* Bypass test for very large objects (see T67319). */ | /* Bypass test for very large objects (see T67319). */ | ||||
| if (UNLIKELY(cull->bsphere.radius > 1e12)) { | if (UNLIKELY(cull->bsphere.radius > 1e12)) { | ||||
| cull->bsphere.radius = -1.0f; | cull->bsphere.radius = -1.0f; | ||||
| } | } | ||||
| } | } | ||||
| else { | else { | ||||
| /* Bypass test. */ | /* Bypass test. */ | ||||
| cull->bsphere.radius = -1.0f; | cull->bsphere.radius = -1.0f; | ||||
| } | } | ||||
| /* Reset user data */ | /* Reset user data */ | ||||
| cull->user_data = NULL; | cull->user_data = nullptr; | ||||
| } | } | ||||
| static DRWResourceHandle drw_resource_handle_new(float (*obmat)[4], Object *ob) | static DRWResourceHandle drw_resource_handle_new(float (*obmat)[4], Object *ob) | ||||
| { | { | ||||
| DRWCullingState *culling = BLI_memblock_alloc(DST.vmempool->cullstates); | DRWCullingState *culling = static_cast<DRWCullingState *>( | ||||
| DRWObjectMatrix *ob_mats = BLI_memblock_alloc(DST.vmempool->obmats); | BLI_memblock_alloc(DST.vmempool->cullstates)); | ||||
| DRWObjectMatrix *ob_mats = static_cast<DRWObjectMatrix *>( | |||||
| BLI_memblock_alloc(DST.vmempool->obmats)); | |||||
| /* FIXME Meh, not always needed but can be accessed after creation. | /* FIXME Meh, not always needed but can be accessed after creation. | ||||
| * Also it needs to have the same resource handle. */ | * Also it needs to have the same resource handle. */ | ||||
| DRWObjectInfos *ob_infos = BLI_memblock_alloc(DST.vmempool->obinfos); | DRWObjectInfos *ob_infos = static_cast<DRWObjectInfos *>( | ||||
| BLI_memblock_alloc(DST.vmempool->obinfos)); | |||||
| UNUSED_VARS(ob_infos); | UNUSED_VARS(ob_infos); | ||||
| DRWResourceHandle handle = DST.resource_handle; | DRWResourceHandle handle = DST.resource_handle; | ||||
| DRW_handle_increment(&DST.resource_handle); | DRW_handle_increment(&DST.resource_handle); | ||||
| if (ob && (ob->transflag & OB_NEG_SCALE)) { | if (ob && (ob->transflag & OB_NEG_SCALE)) { | ||||
| DRW_handle_negative_scale_enable(&handle); | DRW_handle_negative_scale_enable(&handle); | ||||
| } | } | ||||
| Show All 14 Lines | uint32_t DRW_object_resource_id_get(Object *UNUSED(ob)) | ||||
| } | } | ||||
| return handle & ~(1u << 31); | return handle & ~(1u << 31); | ||||
| } | } | ||||
| static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup, | static DRWResourceHandle drw_resource_handle(DRWShadingGroup *shgroup, | ||||
| float (*obmat)[4], | float (*obmat)[4], | ||||
| Object *ob) | Object *ob) | ||||
| { | { | ||||
| if (ob == NULL) { | if (ob == nullptr) { | ||||
| if (obmat == NULL) { | if (obmat == nullptr) { | ||||
| DRWResourceHandle handle = 0; | DRWResourceHandle handle = 0; | ||||
| return handle; | return handle; | ||||
| } | } | ||||
| return drw_resource_handle_new(obmat, NULL); | return drw_resource_handle_new(obmat, nullptr); | ||||
| } | } | ||||
| if (DST.ob_handle == 0) { | if (DST.ob_handle == 0) { | ||||
| DST.ob_handle = drw_resource_handle_new(obmat, ob); | DST.ob_handle = drw_resource_handle_new(obmat, ob); | ||||
| DST.ob_state_obinfo_init = false; | DST.ob_state_obinfo_init = false; | ||||
| } | } | ||||
| if (shgroup->objectinfo) { | if (shgroup->objectinfo) { | ||||
| if (!DST.ob_state_obinfo_init) { | if (!DST.ob_state_obinfo_init) { | ||||
| DST.ob_state_obinfo_init = true; | DST.ob_state_obinfo_init = true; | ||||
| DRWObjectInfos *ob_infos = DRW_memblock_elem_from_handle(DST.vmempool->obinfos, | DRWObjectInfos *ob_infos = static_cast<DRWObjectInfos *>( | ||||
| &DST.ob_handle); | DRW_memblock_elem_from_handle(DST.vmempool->obinfos, &DST.ob_handle)); | ||||
| drw_call_obinfos_init(ob_infos, ob); | drw_call_obinfos_init(ob_infos, ob); | ||||
| } | } | ||||
| } | } | ||||
| if (shgroup->uniform_attrs) { | if (shgroup->uniform_attrs) { | ||||
| drw_uniform_attrs_pool_update(DST.vmempool->obattrs_ubo_pool, | drw_uniform_attrs_pool_update(DST.vmempool->obattrs_ubo_pool, | ||||
| shgroup->uniform_attrs, | shgroup->uniform_attrs, | ||||
| &DST.ob_handle, | &DST.ob_handle, | ||||
| ob, | ob, | ||||
| DST.dupli_parent, | DST.dupli_parent, | ||||
| DST.dupli_source); | DST.dupli_source); | ||||
| } | } | ||||
| return DST.ob_handle; | return DST.ob_handle; | ||||
| } | } | ||||
| static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type) | static void command_type_set(uint64_t *command_type_bits, int index, eDRWCommandType type) | ||||
| { | { | ||||
| command_type_bits[index / 16] |= ((uint64_t)type) << ((index % 16) * 4); | command_type_bits[index / 16] |= uint64_t(type) << ((index % 16) * 4); | ||||
| } | } | ||||
| eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index) | eDRWCommandType command_type_get(const uint64_t *command_type_bits, int index) | ||||
| { | { | ||||
| return ((command_type_bits[index / 16] >> ((index % 16) * 4)) & 0xF); | return eDRWCommandType((command_type_bits[index / 16] >> ((index % 16) * 4)) & 0xF); | ||||
| } | } | ||||
| static void *drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type) | static void *drw_command_create(DRWShadingGroup *shgroup, eDRWCommandType type) | ||||
| { | { | ||||
| DRWCommandChunk *chunk = shgroup->cmd.last; | DRWCommandChunk *chunk = shgroup->cmd.last; | ||||
| if (chunk == NULL) { | if (chunk == nullptr) { | ||||
| DRWCommandSmallChunk *smallchunk = BLI_memblock_alloc(DST.vmempool->commands_small); | DRWCommandSmallChunk *smallchunk = static_cast<DRWCommandSmallChunk *>( | ||||
| BLI_memblock_alloc(DST.vmempool->commands_small)); | |||||
| smallchunk->command_len = ARRAY_SIZE(smallchunk->commands); | smallchunk->command_len = ARRAY_SIZE(smallchunk->commands); | ||||
| smallchunk->command_used = 0; | smallchunk->command_used = 0; | ||||
| smallchunk->command_type[0] = 0x0lu; | smallchunk->command_type[0] = 0x0lu; | ||||
| chunk = (DRWCommandChunk *)smallchunk; | chunk = (DRWCommandChunk *)smallchunk; | ||||
| BLI_LINKS_APPEND(&shgroup->cmd, chunk); | BLI_LINKS_APPEND(&shgroup->cmd, chunk); | ||||
| } | } | ||||
| else if (chunk->command_used == chunk->command_len) { | else if (chunk->command_used == chunk->command_len) { | ||||
| chunk = BLI_memblock_alloc(DST.vmempool->commands); | chunk = static_cast<DRWCommandChunk *>(BLI_memblock_alloc(DST.vmempool->commands)); | ||||
| chunk->command_len = ARRAY_SIZE(chunk->commands); | chunk->command_len = ARRAY_SIZE(chunk->commands); | ||||
| chunk->command_used = 0; | chunk->command_used = 0; | ||||
| memset(chunk->command_type, 0x0, sizeof(chunk->command_type)); | memset(chunk->command_type, 0x0, sizeof(chunk->command_type)); | ||||
| BLI_LINKS_APPEND(&shgroup->cmd, chunk); | BLI_LINKS_APPEND(&shgroup->cmd, chunk); | ||||
| } | } | ||||
| command_type_set(chunk->command_type, chunk->command_used, type); | command_type_set(chunk->command_type, chunk->command_used, type); | ||||
| return chunk->commands + chunk->command_used++; | return chunk->commands + chunk->command_used++; | ||||
| } | } | ||||
| static void drw_command_draw(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle) | static void drw_command_draw(DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle) | ||||
| { | { | ||||
| DRWCommandDraw *cmd = drw_command_create(shgroup, DRW_CMD_DRAW); | DRWCommandDraw *cmd = static_cast<DRWCommandDraw *>(drw_command_create(shgroup, DRW_CMD_DRAW)); | ||||
| cmd->batch = batch; | cmd->batch = batch; | ||||
| cmd->handle = handle; | cmd->handle = handle; | ||||
| } | } | ||||
| static void drw_command_draw_range( | static void drw_command_draw_range( | ||||
| DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count) | DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count) | ||||
| { | { | ||||
| DRWCommandDrawRange *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_RANGE); | DRWCommandDrawRange *cmd = static_cast<DRWCommandDrawRange *>( | ||||
| drw_command_create(shgroup, DRW_CMD_DRAW_RANGE)); | |||||
| cmd->batch = batch; | cmd->batch = batch; | ||||
| cmd->handle = handle; | cmd->handle = handle; | ||||
| cmd->vert_first = start; | cmd->vert_first = start; | ||||
| cmd->vert_count = count; | cmd->vert_count = count; | ||||
| } | } | ||||
| static void drw_command_draw_instance( | static void drw_command_draw_instance( | ||||
| DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint count, bool use_attr) | DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint count, bool use_attr) | ||||
| { | { | ||||
| DRWCommandDrawInstance *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE); | DRWCommandDrawInstance *cmd = static_cast<DRWCommandDrawInstance *>( | ||||
| drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE)); | |||||
| cmd->batch = batch; | cmd->batch = batch; | ||||
| cmd->handle = handle; | cmd->handle = handle; | ||||
| cmd->inst_count = count; | cmd->inst_count = count; | ||||
| cmd->use_attrs = use_attr; | cmd->use_attrs = use_attr; | ||||
| } | } | ||||
| static void drw_command_draw_intance_range( | static void drw_command_draw_intance_range( | ||||
| DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count) | DRWShadingGroup *shgroup, GPUBatch *batch, DRWResourceHandle handle, uint start, uint count) | ||||
| { | { | ||||
| DRWCommandDrawInstanceRange *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE_RANGE); | DRWCommandDrawInstanceRange *cmd = static_cast<DRWCommandDrawInstanceRange *>( | ||||
| drw_command_create(shgroup, DRW_CMD_DRAW_INSTANCE_RANGE)); | |||||
| cmd->batch = batch; | cmd->batch = batch; | ||||
| cmd->handle = handle; | cmd->handle = handle; | ||||
| cmd->inst_first = start; | cmd->inst_first = start; | ||||
| cmd->inst_count = count; | cmd->inst_count = count; | ||||
| } | } | ||||
| static void drw_command_compute(DRWShadingGroup *shgroup, | static void drw_command_compute(DRWShadingGroup *shgroup, | ||||
| int groups_x_len, | int groups_x_len, | ||||
| int groups_y_len, | int groups_y_len, | ||||
| int groups_z_len) | int groups_z_len) | ||||
| { | { | ||||
| DRWCommandCompute *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE); | DRWCommandCompute *cmd = static_cast<DRWCommandCompute *>( | ||||
| drw_command_create(shgroup, DRW_CMD_COMPUTE)); | |||||
| cmd->groups_x_len = groups_x_len; | cmd->groups_x_len = groups_x_len; | ||||
| cmd->groups_y_len = groups_y_len; | cmd->groups_y_len = groups_y_len; | ||||
| cmd->groups_z_len = groups_z_len; | cmd->groups_z_len = groups_z_len; | ||||
| } | } | ||||
| static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3]) | static void drw_command_compute_ref(DRWShadingGroup *shgroup, int groups_ref[3]) | ||||
| { | { | ||||
| DRWCommandComputeRef *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE_REF); | DRWCommandComputeRef *cmd = static_cast<DRWCommandComputeRef *>( | ||||
| drw_command_create(shgroup, DRW_CMD_COMPUTE_REF)); | |||||
| cmd->groups_ref = groups_ref; | cmd->groups_ref = groups_ref; | ||||
| } | } | ||||
| static void drw_command_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf) | static void drw_command_compute_indirect(DRWShadingGroup *shgroup, GPUStorageBuf *indirect_buf) | ||||
| { | { | ||||
| DRWCommandComputeIndirect *cmd = drw_command_create(shgroup, DRW_CMD_COMPUTE_INDIRECT); | DRWCommandComputeIndirect *cmd = static_cast<DRWCommandComputeIndirect *>( | ||||
| drw_command_create(shgroup, DRW_CMD_COMPUTE_INDIRECT)); | |||||
| cmd->indirect_buf = indirect_buf; | cmd->indirect_buf = indirect_buf; | ||||
| } | } | ||||
| static void drw_command_barrier(DRWShadingGroup *shgroup, eGPUBarrier type) | static void drw_command_barrier(DRWShadingGroup *shgroup, eGPUBarrier type) | ||||
| { | { | ||||
| DRWCommandBarrier *cmd = drw_command_create(shgroup, DRW_CMD_BARRIER); | DRWCommandBarrier *cmd = static_cast<DRWCommandBarrier *>( | ||||
| drw_command_create(shgroup, DRW_CMD_BARRIER)); | |||||
| cmd->type = type; | cmd->type = type; | ||||
| } | } | ||||
| static void drw_command_draw_procedural(DRWShadingGroup *shgroup, | static void drw_command_draw_procedural(DRWShadingGroup *shgroup, | ||||
| GPUBatch *batch, | GPUBatch *batch, | ||||
| DRWResourceHandle handle, | DRWResourceHandle handle, | ||||
| uint vert_count) | uint vert_count) | ||||
| { | { | ||||
| DRWCommandDrawProcedural *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_PROCEDURAL); | DRWCommandDrawProcedural *cmd = static_cast<DRWCommandDrawProcedural *>( | ||||
| drw_command_create(shgroup, DRW_CMD_DRAW_PROCEDURAL)); | |||||
| cmd->batch = batch; | cmd->batch = batch; | ||||
| cmd->handle = handle; | cmd->handle = handle; | ||||
| cmd->vert_count = vert_count; | cmd->vert_count = vert_count; | ||||
| } | } | ||||
| static void drw_command_draw_indirect(DRWShadingGroup *shgroup, | static void drw_command_draw_indirect(DRWShadingGroup *shgroup, | ||||
| GPUBatch *batch, | GPUBatch *batch, | ||||
| DRWResourceHandle handle, | DRWResourceHandle handle, | ||||
| GPUStorageBuf *indirect_buf) | GPUStorageBuf *indirect_buf) | ||||
| { | { | ||||
| DRWCommandDrawIndirect *cmd = drw_command_create(shgroup, DRW_CMD_DRAW_INDIRECT); | DRWCommandDrawIndirect *cmd = static_cast<DRWCommandDrawIndirect *>( | ||||
| drw_command_create(shgroup, DRW_CMD_DRAW_INDIRECT)); | |||||
| cmd->batch = batch; | cmd->batch = batch; | ||||
| cmd->handle = handle; | cmd->handle = handle; | ||||
| cmd->indirect_buf = indirect_buf; | cmd->indirect_buf = indirect_buf; | ||||
| } | } | ||||
| static void drw_command_set_select_id(DRWShadingGroup *shgroup, GPUVertBuf *buf, uint select_id) | static void drw_command_set_select_id(DRWShadingGroup *shgroup, GPUVertBuf *buf, uint select_id) | ||||
| { | { | ||||
| /* Only one can be valid. */ | /* Only one can be valid. */ | ||||
| BLI_assert(buf == NULL || select_id == -1); | BLI_assert(buf == nullptr || select_id == -1); | ||||
| DRWCommandSetSelectID *cmd = drw_command_create(shgroup, DRW_CMD_SELECTID); | DRWCommandSetSelectID *cmd = static_cast<DRWCommandSetSelectID *>( | ||||
| drw_command_create(shgroup, DRW_CMD_SELECTID)); | |||||
| cmd->select_buf = buf; | cmd->select_buf = buf; | ||||
| cmd->select_id = select_id; | cmd->select_id = select_id; | ||||
| } | } | ||||
| static void drw_command_set_stencil_mask(DRWShadingGroup *shgroup, | static void drw_command_set_stencil_mask(DRWShadingGroup *shgroup, | ||||
| uint write_mask, | uint write_mask, | ||||
| uint reference, | uint reference, | ||||
| uint compare_mask) | uint compare_mask) | ||||
| { | { | ||||
| BLI_assert(write_mask <= 0xFF); | BLI_assert(write_mask <= 0xFF); | ||||
| BLI_assert(reference <= 0xFF); | BLI_assert(reference <= 0xFF); | ||||
| BLI_assert(compare_mask <= 0xFF); | BLI_assert(compare_mask <= 0xFF); | ||||
| DRWCommandSetStencil *cmd = drw_command_create(shgroup, DRW_CMD_STENCIL); | DRWCommandSetStencil *cmd = static_cast<DRWCommandSetStencil *>( | ||||
| drw_command_create(shgroup, DRW_CMD_STENCIL)); | |||||
| cmd->write_mask = write_mask; | cmd->write_mask = write_mask; | ||||
| cmd->comp_mask = compare_mask; | cmd->comp_mask = compare_mask; | ||||
| cmd->ref = reference; | cmd->ref = reference; | ||||
| } | } | ||||
| static void drw_command_clear(DRWShadingGroup *shgroup, | static void drw_command_clear(DRWShadingGroup *shgroup, | ||||
| eGPUFrameBufferBits channels, | eGPUFrameBufferBits channels, | ||||
| uchar r, | uchar r, | ||||
| uchar g, | uchar g, | ||||
| uchar b, | uchar b, | ||||
| uchar a, | uchar a, | ||||
| float depth, | float depth, | ||||
| uchar stencil) | uchar stencil) | ||||
| { | { | ||||
| DRWCommandClear *cmd = drw_command_create(shgroup, DRW_CMD_CLEAR); | DRWCommandClear *cmd = static_cast<DRWCommandClear *>( | ||||
| drw_command_create(shgroup, DRW_CMD_CLEAR)); | |||||
| cmd->clear_channels = channels; | cmd->clear_channels = channels; | ||||
| cmd->r = r; | cmd->r = r; | ||||
| cmd->g = g; | cmd->g = g; | ||||
| cmd->b = b; | cmd->b = b; | ||||
| cmd->a = a; | cmd->a = a; | ||||
| cmd->depth = depth; | cmd->depth = depth; | ||||
| cmd->stencil = stencil; | cmd->stencil = stencil; | ||||
| } | } | ||||
| static void drw_command_set_mutable_state(DRWShadingGroup *shgroup, | static void drw_command_set_mutable_state(DRWShadingGroup *shgroup, | ||||
| DRWState enable, | DRWState enable, | ||||
| DRWState disable) | DRWState disable) | ||||
| { | { | ||||
| /* TODO: Restrict what state can be changed. */ | /* TODO: Restrict what state can be changed. */ | ||||
| DRWCommandSetMutableState *cmd = drw_command_create(shgroup, DRW_CMD_DRWSTATE); | DRWCommandSetMutableState *cmd = static_cast<DRWCommandSetMutableState *>( | ||||
| drw_command_create(shgroup, DRW_CMD_DRWSTATE)); | |||||
| cmd->enable = enable; | cmd->enable = enable; | ||||
| cmd->disable = disable; | cmd->disable = disable; | ||||
| } | } | ||||
| void DRW_shgroup_call_ex(DRWShadingGroup *shgroup, | void DRW_shgroup_call_ex(DRWShadingGroup *shgroup, | ||||
| Object *ob, | Object *ob, | ||||
| float (*obmat)[4], | float (*obmat)[4], | ||||
| struct GPUBatch *geom, | struct GPUBatch *geom, | ||||
| bool bypass_culling, | bool bypass_culling, | ||||
| void *user_data) | void *user_data) | ||||
| { | { | ||||
| BLI_assert(geom != NULL); | BLI_assert(geom != nullptr); | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| drw_command_set_select_id(shgroup, NULL, DST.select_id); | drw_command_set_select_id(shgroup, nullptr, DST.select_id); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : obmat, ob); | DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : obmat, ob); | ||||
| drw_command_draw(shgroup, geom, handle); | drw_command_draw(shgroup, geom, handle); | ||||
| /* Culling data. */ | /* Culling data. */ | ||||
| if (user_data || bypass_culling) { | if (user_data || bypass_culling) { | ||||
| DRWCullingState *culling = DRW_memblock_elem_from_handle(DST.vmempool->cullstates, | DRWCullingState *culling = static_cast<DRWCullingState *>( | ||||
| &DST.ob_handle); | DRW_memblock_elem_from_handle(DST.vmempool->cullstates, &DST.ob_handle)); | ||||
| if (user_data) { | if (user_data) { | ||||
| culling->user_data = user_data; | culling->user_data = user_data; | ||||
| } | } | ||||
| if (bypass_culling) { | if (bypass_culling) { | ||||
| /* NOTE: this will disable culling for the whole object. */ | /* NOTE: this will disable culling for the whole object. */ | ||||
| culling->bsphere.radius = -1.0f; | culling->bsphere.radius = -1.0f; | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| void DRW_shgroup_call_range( | void DRW_shgroup_call_range( | ||||
| DRWShadingGroup *shgroup, struct Object *ob, GPUBatch *geom, uint v_sta, uint v_num) | DRWShadingGroup *shgroup, struct Object *ob, GPUBatch *geom, uint v_sta, uint v_num) | ||||
| { | { | ||||
| BLI_assert(geom != NULL); | BLI_assert(geom != nullptr); | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| drw_command_set_select_id(shgroup, NULL, DST.select_id); | drw_command_set_select_id(shgroup, nullptr, DST.select_id); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob); | DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob); | ||||
| drw_command_draw_range(shgroup, geom, handle, v_sta, v_num); | drw_command_draw_range(shgroup, geom, handle, v_sta, v_num); | ||||
| } | } | ||||
| void DRW_shgroup_call_instance_range( | void DRW_shgroup_call_instance_range( | ||||
| DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_num) | DRWShadingGroup *shgroup, Object *ob, struct GPUBatch *geom, uint i_sta, uint i_num) | ||||
| { | { | ||||
| BLI_assert(geom != NULL); | BLI_assert(geom != nullptr); | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| drw_command_set_select_id(shgroup, NULL, DST.select_id); | drw_command_set_select_id(shgroup, nullptr, DST.select_id); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob); | DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob); | ||||
| drw_command_draw_intance_range(shgroup, geom, handle, i_sta, i_num); | drw_command_draw_intance_range(shgroup, geom, handle, i_sta, i_num); | ||||
| } | } | ||||
| void DRW_shgroup_call_compute(DRWShadingGroup *shgroup, | void DRW_shgroup_call_compute(DRWShadingGroup *shgroup, | ||||
| int groups_x_len, | int groups_x_len, | ||||
| int groups_y_len, | int groups_y_len, | ||||
| int groups_z_len) | int groups_z_len) | ||||
| { | { | ||||
| Show All 25 Lines | |||||
| } | } | ||||
| static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, | static void drw_shgroup_call_procedural_add_ex(DRWShadingGroup *shgroup, | ||||
| GPUBatch *geom, | GPUBatch *geom, | ||||
| Object *ob, | Object *ob, | ||||
| uint vert_count) | uint vert_count) | ||||
| { | { | ||||
| BLI_assert(vert_count > 0); | BLI_assert(vert_count > 0); | ||||
| BLI_assert(geom != NULL); | BLI_assert(geom != nullptr); | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| drw_command_set_select_id(shgroup, NULL, DST.select_id); | drw_command_set_select_id(shgroup, nullptr, DST.select_id); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob); | DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob); | ||||
| drw_command_draw_procedural(shgroup, geom, handle, vert_count); | drw_command_draw_procedural(shgroup, geom, handle, vert_count); | ||||
| } | } | ||||
| void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, Object *ob, uint point_count) | void DRW_shgroup_call_procedural_points(DRWShadingGroup *shgroup, Object *ob, uint point_count) | ||||
| { | { | ||||
| struct GPUBatch *geom = drw_cache_procedural_points_get(); | struct GPUBatch *geom = drw_cache_procedural_points_get(); | ||||
| drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, point_count); | drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, point_count); | ||||
| } | } | ||||
| Show All 10 Lines | void DRW_shgroup_call_procedural_triangles(DRWShadingGroup *shgroup, Object *ob, uint tri_count) | ||||
| drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tri_count * 3); | drw_shgroup_call_procedural_add_ex(shgroup, geom, ob, tri_count * 3); | ||||
| } | } | ||||
| void DRW_shgroup_call_procedural_indirect(DRWShadingGroup *shgroup, | void DRW_shgroup_call_procedural_indirect(DRWShadingGroup *shgroup, | ||||
| GPUPrimType primitive_type, | GPUPrimType primitive_type, | ||||
| Object *ob, | Object *ob, | ||||
| GPUStorageBuf *indirect_buf) | GPUStorageBuf *indirect_buf) | ||||
| { | { | ||||
| struct GPUBatch *geom = NULL; | struct GPUBatch *geom = nullptr; | ||||
| switch (primitive_type) { | switch (primitive_type) { | ||||
| case GPU_PRIM_POINTS: | case GPU_PRIM_POINTS: | ||||
| geom = drw_cache_procedural_points_get(); | geom = drw_cache_procedural_points_get(); | ||||
| break; | break; | ||||
| case GPU_PRIM_LINES: | case GPU_PRIM_LINES: | ||||
| geom = drw_cache_procedural_lines_get(); | geom = drw_cache_procedural_lines_get(); | ||||
| break; | break; | ||||
| case GPU_PRIM_TRIS: | case GPU_PRIM_TRIS: | ||||
| geom = drw_cache_procedural_triangles_get(); | geom = drw_cache_procedural_triangles_get(); | ||||
| break; | break; | ||||
| case GPU_PRIM_TRI_STRIP: | case GPU_PRIM_TRI_STRIP: | ||||
| geom = drw_cache_procedural_triangle_strips_get(); | geom = drw_cache_procedural_triangle_strips_get(); | ||||
| break; | break; | ||||
| default: | default: | ||||
| BLI_assert_msg(0, | BLI_assert_msg(0, | ||||
| "Unsupported primitive type in DRW_shgroup_call_procedural_indirect. Add new " | "Unsupported primitive type in DRW_shgroup_call_procedural_indirect. Add new " | ||||
| "one as needed."); | "one as needed."); | ||||
| break; | break; | ||||
| } | } | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| drw_command_set_select_id(shgroup, NULL, DST.select_id); | drw_command_set_select_id(shgroup, nullptr, DST.select_id); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob); | DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob); | ||||
| drw_command_draw_indirect(shgroup, geom, handle, indirect_buf); | drw_command_draw_indirect(shgroup, geom, handle, indirect_buf); | ||||
| } | } | ||||
| void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, | void DRW_shgroup_call_instances(DRWShadingGroup *shgroup, | ||||
| Object *ob, | Object *ob, | ||||
| struct GPUBatch *geom, | struct GPUBatch *geom, | ||||
| uint count) | uint count) | ||||
| { | { | ||||
| BLI_assert(geom != NULL); | BLI_assert(geom != nullptr); | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| drw_command_set_select_id(shgroup, NULL, DST.select_id); | drw_command_set_select_id(shgroup, nullptr, DST.select_id); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob); | DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob); | ||||
| drw_command_draw_instance(shgroup, geom, handle, count, false); | drw_command_draw_instance(shgroup, geom, handle, count, false); | ||||
| } | } | ||||
| void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup, | void DRW_shgroup_call_instances_with_attrs(DRWShadingGroup *shgroup, | ||||
| Object *ob, | Object *ob, | ||||
| struct GPUBatch *geom, | struct GPUBatch *geom, | ||||
| struct GPUBatch *inst_attributes) | struct GPUBatch *inst_attributes) | ||||
| { | { | ||||
| BLI_assert(geom != NULL); | BLI_assert(geom != nullptr); | ||||
| BLI_assert(inst_attributes != NULL); | BLI_assert(inst_attributes != nullptr); | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| drw_command_set_select_id(shgroup, NULL, DST.select_id); | drw_command_set_select_id(shgroup, nullptr, DST.select_id); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : NULL, ob); | DRWResourceHandle handle = drw_resource_handle(shgroup, ob ? ob->obmat : nullptr, ob); | ||||
| GPUBatch *batch = DRW_temp_batch_instance_request( | GPUBatch *batch = DRW_temp_batch_instance_request( | ||||
| DST.vmempool->idatalist, NULL, inst_attributes, geom); | DST.vmempool->idatalist, nullptr, inst_attributes, geom); | ||||
| drw_command_draw_instance(shgroup, batch, handle, 0, true); | drw_command_draw_instance(shgroup, batch, handle, 0, true); | ||||
| } | } | ||||
| #define SCULPT_DEBUG_BUFFERS (G.debug_value == 889) | #define SCULPT_DEBUG_BUFFERS (G.debug_value == 889) | ||||
| typedef struct DRWSculptCallbackData { | typedef struct DRWSculptCallbackData { | ||||
| Object *ob; | Object *ob; | ||||
| DRWShadingGroup **shading_groups; | DRWShadingGroup **shading_groups; | ||||
| int num_shading_groups; | int num_shading_groups; | ||||
| bool use_wire; | bool use_wire; | ||||
| bool use_mats; | bool use_mats; | ||||
| bool use_mask; | bool use_mask; | ||||
| bool use_fsets; | bool use_fsets; | ||||
| bool fast_mode; /* Set by draw manager. Do not init. */ | bool fast_mode; /* Set by draw manager. Do not init. */ | ||||
| int debug_node_nr; | int debug_node_nr; | ||||
| PBVHAttrReq *attrs; | |||||
| int attrs_num; | |||||
| } DRWSculptCallbackData; | } DRWSculptCallbackData; | ||||
| #define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9]) | #define SCULPT_DEBUG_COLOR(id) (sculpt_debug_colors[id % 9]) | ||||
| static float sculpt_debug_colors[9][4] = { | static float sculpt_debug_colors[9][4] = { | ||||
| {1.0f, 0.2f, 0.2f, 1.0f}, | {1.0f, 0.2f, 0.2f, 1.0f}, | ||||
| {0.2f, 1.0f, 0.2f, 1.0f}, | {0.2f, 1.0f, 0.2f, 1.0f}, | ||||
| {0.2f, 0.2f, 1.0f, 1.0f}, | {0.2f, 0.2f, 1.0f, 1.0f}, | ||||
| {1.0f, 1.0f, 0.2f, 1.0f}, | {1.0f, 1.0f, 0.2f, 1.0f}, | ||||
| {0.2f, 1.0f, 1.0f, 1.0f}, | {0.2f, 1.0f, 1.0f, 1.0f}, | ||||
| {1.0f, 0.2f, 1.0f, 1.0f}, | {1.0f, 0.2f, 1.0f, 1.0f}, | ||||
| {1.0f, 0.7f, 0.2f, 1.0f}, | {1.0f, 0.7f, 0.2f, 1.0f}, | ||||
| {0.2f, 1.0f, 0.7f, 1.0f}, | {0.2f, 1.0f, 0.7f, 1.0f}, | ||||
| {0.7f, 0.2f, 1.0f, 1.0f}, | {0.7f, 0.2f, 1.0f, 1.0f}, | ||||
| }; | }; | ||||
| static void sculpt_draw_cb(DRWSculptCallbackData *scd, GPU_PBVH_Buffers *buffers) | static void sculpt_draw_cb(DRWSculptCallbackData *scd, | ||||
| PBVHBatches *batches, | |||||
| PBVH_GPU_Args *pbvh_draw_args) | |||||
| { | { | ||||
| if (!buffers) { | if (!batches) { | ||||
| return; | return; | ||||
| } | } | ||||
| /* Meh... use_mask is a bit misleading here. */ | int primcount; | ||||
| if (scd->use_mask && !GPU_pbvh_buffers_has_overlays(buffers)) { | GPUBatch *geom; | ||||
| return; | |||||
| if (!scd->use_wire) { | |||||
| geom = DRW_pbvh_tris_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount); | |||||
| } | |||||
| else { | |||||
| geom = DRW_pbvh_lines_get(batches, scd->attrs, scd->attrs_num, pbvh_draw_args, &primcount); | |||||
| } | } | ||||
| GPUBatch *geom = GPU_pbvh_buffers_batch_get(buffers, scd->fast_mode, scd->use_wire); | |||||
| short index = 0; | short index = 0; | ||||
| if (scd->use_mats) { | if (scd->use_mats) { | ||||
| index = GPU_pbvh_buffers_material_index_get(buffers); | index = drw_pbvh_material_index_get(batches); | ||||
| if (index >= scd->num_shading_groups) { | if (index >= scd->num_shading_groups) { | ||||
| index = 0; | index = 0; | ||||
| } | } | ||||
| } | } | ||||
| DRWShadingGroup *shgrp = scd->shading_groups[index]; | DRWShadingGroup *shgrp = scd->shading_groups[index]; | ||||
| if (geom != NULL && shgrp != NULL) { | if (geom != nullptr && shgrp != nullptr) { | ||||
| if (SCULPT_DEBUG_BUFFERS) { | if (SCULPT_DEBUG_BUFFERS) { | ||||
| /* Color each buffers in different colors. Only work in solid/Xray mode. */ | /* Color each buffers in different colors. Only work in solid/Xray mode. */ | ||||
| shgrp = DRW_shgroup_create_sub(shgrp); | shgrp = DRW_shgroup_create_sub(shgrp); | ||||
| DRW_shgroup_uniform_vec3( | DRW_shgroup_uniform_vec3( | ||||
| shgrp, "materialDiffuseColor", SCULPT_DEBUG_COLOR(scd->debug_node_nr++), 1); | shgrp, "materialDiffuseColor", SCULPT_DEBUG_COLOR(scd->debug_node_nr++), 1); | ||||
| } | } | ||||
| /* DRW_shgroup_call_no_cull reuses matrices calculations for all the drawcalls of this | /* DRW_shgroup_call_no_cull reuses matrices calculations for all the drawcalls of this | ||||
| Show All 39 Lines | static void drw_sculpt_get_frustum_planes(Object *ob, float planes[6][4]) | ||||
| for (int i = 0; i < 6; i++) { | for (int i = 0; i < 6; i++) { | ||||
| mul_m4_v4(tmat, planes[i]); | mul_m4_v4(tmat, planes[i]); | ||||
| } | } | ||||
| } | } | ||||
| static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd) | static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd) | ||||
| { | { | ||||
| /* PBVH should always exist for non-empty meshes, created by depsgraph eval. */ | /* PBVH should always exist for non-empty meshes, created by depsgraph eval. */ | ||||
| PBVH *pbvh = (scd->ob->sculpt) ? scd->ob->sculpt->pbvh : NULL; | PBVH *pbvh = (scd->ob->sculpt) ? scd->ob->sculpt->pbvh : nullptr; | ||||
| if (!pbvh) { | if (!pbvh) { | ||||
| return; | return; | ||||
| } | } | ||||
| const DRWContextState *drwctx = DRW_context_state_get(); | const DRWContextState *drwctx = DRW_context_state_get(); | ||||
| RegionView3D *rv3d = drwctx->rv3d; | RegionView3D *rv3d = drwctx->rv3d; | ||||
| const bool navigating = rv3d && (rv3d->rflag & RV3D_NAVIGATING); | const bool navigating = rv3d && (rv3d->rflag & RV3D_NAVIGATING); | ||||
| Paint *p = NULL; | Paint *p = nullptr; | ||||
| if (drwctx->evil_C != NULL) { | if (drwctx->evil_C != nullptr) { | ||||
| p = BKE_paint_get_active_from_context(drwctx->evil_C); | p = BKE_paint_get_active_from_context(drwctx->evil_C); | ||||
| } | } | ||||
| /* Frustum planes to show only visible PBVH nodes. */ | /* Frustum planes to show only visible PBVH nodes. */ | ||||
| float update_planes[6][4]; | float update_planes[6][4]; | ||||
| float draw_planes[6][4]; | float draw_planes[6][4]; | ||||
| PBVHFrustumPlanes update_frustum; | PBVHFrustumPlanes update_frustum; | ||||
| PBVHFrustumPlanes draw_frustum; | PBVHFrustumPlanes draw_frustum; | ||||
| Show All 27 Lines | static void drw_sculpt_generate_calls(DRWSculptCallbackData *scd) | ||||
| /* Update draw buffers only for visible nodes while painting. | /* Update draw buffers only for visible nodes while painting. | ||||
| * But do update them otherwise so navigating stays smooth. */ | * But do update them otherwise so navigating stays smooth. */ | ||||
| bool update_only_visible = rv3d && !(rv3d->rflag & RV3D_PAINTING); | bool update_only_visible = rv3d && !(rv3d->rflag & RV3D_PAINTING); | ||||
| if (p && (p->flags & PAINT_SCULPT_DELAY_UPDATES)) { | if (p && (p->flags & PAINT_SCULPT_DELAY_UPDATES)) { | ||||
| update_only_visible = true; | update_only_visible = true; | ||||
| } | } | ||||
| Mesh *mesh = scd->ob->data; | Mesh *mesh = static_cast<Mesh *>(scd->ob->data); | ||||
| BKE_pbvh_update_normals(pbvh, mesh->runtime.subdiv_ccg); | BKE_pbvh_update_normals(pbvh, mesh->runtime.subdiv_ccg); | ||||
| BKE_pbvh_draw_cb(pbvh, | BKE_pbvh_draw_cb(pbvh, | ||||
| update_only_visible, | update_only_visible, | ||||
| &update_frustum, | &update_frustum, | ||||
| &draw_frustum, | &draw_frustum, | ||||
| (void (*)(void *, GPU_PBVH_Buffers *))sculpt_draw_cb, | (void (*)(void *, PBVHBatches *, PBVH_GPU_Args *))sculpt_draw_cb, | ||||
| scd, | scd, | ||||
| scd->use_mats); | scd->use_mats, | ||||
| scd->attrs, | |||||
| scd->attrs_num); | |||||
| if (SCULPT_DEBUG_BUFFERS) { | if (SCULPT_DEBUG_BUFFERS) { | ||||
| int debug_node_nr = 0; | int debug_node_nr = 0; | ||||
| DRW_debug_modelmat(scd->ob->obmat); | DRW_debug_modelmat(scd->ob->obmat); | ||||
| BKE_pbvh_draw_debug_cb( | BKE_pbvh_draw_debug_cb( | ||||
| pbvh, | pbvh, | ||||
| (void (*)(PBVHNode * n, void *d, const float min[3], const float max[3], PBVHNodeFlags f)) | (void (*)(PBVHNode * n, void *d, const float min[3], const float max[3], PBVHNodeFlags f)) | ||||
| sculpt_debug_cb, | sculpt_debug_cb, | ||||
| &debug_node_nr); | &debug_node_nr); | ||||
| } | } | ||||
| } | } | ||||
| void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, Object *ob, bool use_wire, bool use_mask) | void DRW_shgroup_call_sculpt(DRWShadingGroup *shgroup, | ||||
| Object *ob, | |||||
| bool use_wire, | |||||
| bool use_mask, | |||||
| bool use_fset, | |||||
| bool use_color, | |||||
| bool use_uv) | |||||
| { | { | ||||
| DRWSculptCallbackData scd = { | DRWSculptCallbackData scd{}; | ||||
| .ob = ob, | scd.ob = ob; | ||||
| .shading_groups = &shgroup, | scd.shading_groups = &shgroup; | ||||
| .num_shading_groups = 1, | scd.num_shading_groups = 1; | ||||
| .use_wire = use_wire, | scd.use_wire = use_wire; | ||||
| .use_mats = false, | scd.use_mats = false; | ||||
| .use_mask = use_mask, | scd.use_mask = use_mask; | ||||
| }; | |||||
| PBVHAttrReq attrs[16]; | |||||
| int attrs_num = 0; | |||||
| memset(attrs, 0, sizeof(attrs)); | |||||
| /* NOTE: these are NOT #eCustomDataType, they are extended values, ASAN may warn about this. */ | |||||
| attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_CO_TYPE; | |||||
| attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_NO_TYPE; | |||||
| if (use_mask) { | |||||
| attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_MASK_TYPE; | |||||
| } | |||||
| if (use_fset) { | |||||
| attrs[attrs_num++].type = (eCustomDataType)CD_PBVH_FSET_TYPE; | |||||
| } | |||||
| Mesh *me = BKE_object_get_original_mesh(ob); | |||||
| if (use_color) { | |||||
| CustomDataLayer *layer = BKE_id_attributes_active_color_get(&me->id); | |||||
| if (layer) { | |||||
| eAttrDomain domain = BKE_id_attribute_domain(&me->id, layer); | |||||
| attrs[attrs_num].type = eCustomDataType(layer->type); | |||||
| attrs[attrs_num].domain = domain; | |||||
| BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name)); | |||||
| attrs_num++; | |||||
| } | |||||
| } | |||||
| if (use_uv) { | |||||
| int layer_i = CustomData_get_active_layer_index(&me->ldata, CD_MLOOPUV); | |||||
| if (layer_i != -1) { | |||||
| CustomDataLayer *layer = me->ldata.layers + layer_i; | |||||
| attrs[attrs_num].type = CD_MLOOPUV; | |||||
| attrs[attrs_num].domain = ATTR_DOMAIN_CORNER; | |||||
| BLI_strncpy(attrs[attrs_num].name, layer->name, sizeof(attrs[attrs_num].name)); | |||||
| attrs_num++; | |||||
| } | |||||
| } | |||||
| scd.attrs = attrs; | |||||
| scd.attrs_num = attrs_num; | |||||
| drw_sculpt_generate_calls(&scd); | drw_sculpt_generate_calls(&scd); | ||||
| } | } | ||||
| void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups, | void DRW_shgroup_call_sculpt_with_materials(DRWShadingGroup **shgroups, | ||||
| GPUMaterial **gpumats, | |||||
| int num_shgroups, | int num_shgroups, | ||||
| Object *ob) | Object *ob) | ||||
| { | { | ||||
| DRWSculptCallbackData scd = { | DRW_Attributes draw_attrs; | ||||
| .ob = ob, | DRW_MeshCDMask cd_needed; | ||||
| .shading_groups = shgroups, | |||||
| .num_shading_groups = num_shgroups, | if (gpumats) { | ||||
| .use_wire = false, | DRW_mesh_get_attributes(ob, (Mesh *)ob->data, gpumats, num_shgroups, &draw_attrs, &cd_needed); | ||||
| .use_mats = true, | } | ||||
| .use_mask = false, | else { | ||||
| }; | memset(&draw_attrs, 0, sizeof(draw_attrs)); | ||||
| memset(&cd_needed, 0, sizeof(cd_needed)); | |||||
| } | |||||
| int attrs_num = 2 + draw_attrs.num_requests; | |||||
| /* UV maps are not in attribute requests. */ | |||||
| attrs_num += count_bits_i(cd_needed.uv); | |||||
| blender::Array<PBVHAttrReq, 16> attrs(attrs_num, PBVHAttrReq{}); | |||||
| int attrs_i = 0; | |||||
| /* NOTE: these are NOT #eCustomDataType, they are extended values, ASAN may warn about this. */ | |||||
| attrs[attrs_i++].type = (eCustomDataType)CD_PBVH_CO_TYPE; | |||||
| attrs[attrs_i++].type = (eCustomDataType)CD_PBVH_NO_TYPE; | |||||
| for (int i = 0; i < draw_attrs.num_requests; i++) { | |||||
| DRW_AttributeRequest *req = draw_attrs.requests + i; | |||||
| attrs[attrs_i].type = req->cd_type; | |||||
| attrs[attrs_i].domain = req->domain; | |||||
| BLI_strncpy(attrs[attrs_i].name, req->attribute_name, sizeof(PBVHAttrReq::name)); | |||||
| attrs_i++; | |||||
| } | |||||
| /* UV maps are not in attribute requests. */ | |||||
| Mesh *me = (Mesh *)ob->data; | |||||
| for (uint i = 0; i < 32; i++) { | |||||
| if (cd_needed.uv & (1 << i)) { | |||||
| int layer_i = CustomData_get_layer_index_n(&me->ldata, CD_MLOOPUV, i); | |||||
| CustomDataLayer *layer = layer_i != -1 ? me->ldata.layers + layer_i : nullptr; | |||||
| if (layer) { | |||||
| attrs[attrs_i].type = CD_MLOOPUV; | |||||
| attrs[attrs_i].domain = ATTR_DOMAIN_CORNER; | |||||
| BLI_strncpy(attrs[attrs_i].name, layer->name, sizeof(PBVHAttrReq::name)); | |||||
| attrs_i++; | |||||
| } | |||||
| } | |||||
| } | |||||
| attrs_num = attrs_i; | |||||
| DRWSculptCallbackData scd{}; | |||||
| scd.ob = ob; | |||||
| scd.shading_groups = shgroups; | |||||
| scd.num_shading_groups = num_shgroups; | |||||
| scd.use_wire = false; | |||||
| scd.use_mats = true; | |||||
| scd.use_mask = false; | |||||
| scd.attrs = attrs.data(); | |||||
| scd.attrs_num = attrs_num; | |||||
| drw_sculpt_generate_calls(&scd); | drw_sculpt_generate_calls(&scd); | ||||
| } | } | ||||
| static GPUVertFormat inst_select_format = {0}; | static GPUVertFormat inst_select_format = {0}; | ||||
| DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup, | DRWCallBuffer *DRW_shgroup_call_buffer(DRWShadingGroup *shgroup, | ||||
| struct GPUVertFormat *format, | struct GPUVertFormat *format, | ||||
| GPUPrimType prim_type) | GPUPrimType prim_type) | ||||
| { | { | ||||
| BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN)); | BLI_assert(ELEM(prim_type, GPU_PRIM_POINTS, GPU_PRIM_LINES, GPU_PRIM_TRI_FAN)); | ||||
| BLI_assert(format != NULL); | BLI_assert(format != nullptr); | ||||
| DRWCallBuffer *callbuf = BLI_memblock_alloc(DST.vmempool->callbuffers); | DRWCallBuffer *callbuf = static_cast<DRWCallBuffer *>( | ||||
| BLI_memblock_alloc(DST.vmempool->callbuffers)); | |||||
| callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count); | callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count); | ||||
| callbuf->buf_select = NULL; | callbuf->buf_select = nullptr; | ||||
| callbuf->count = 0; | callbuf->count = 0; | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| /* Not actually used for rendering but alloced in one chunk. */ | /* Not actually used for rendering but allocated in one chunk. */ | ||||
| if (inst_select_format.attr_len == 0) { | if (inst_select_format.attr_len == 0) { | ||||
| GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); | GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); | ||||
| } | } | ||||
| callbuf->buf_select = DRW_temp_buffer_request( | callbuf->buf_select = DRW_temp_buffer_request( | ||||
| DST.vmempool->idatalist, &inst_select_format, &callbuf->count); | DST.vmempool->idatalist, &inst_select_format, &callbuf->count); | ||||
| drw_command_set_select_id(shgroup, callbuf->buf_select, -1); | drw_command_set_select_id(shgroup, callbuf->buf_select, -1); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL); | DRWResourceHandle handle = drw_resource_handle(shgroup, nullptr, nullptr); | ||||
| GPUBatch *batch = DRW_temp_batch_request(DST.vmempool->idatalist, callbuf->buf, prim_type); | GPUBatch *batch = DRW_temp_batch_request(DST.vmempool->idatalist, callbuf->buf, prim_type); | ||||
| drw_command_draw(shgroup, batch, handle); | drw_command_draw(shgroup, batch, handle); | ||||
| return callbuf; | return callbuf; | ||||
| } | } | ||||
| DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup, | DRWCallBuffer *DRW_shgroup_call_buffer_instance(DRWShadingGroup *shgroup, | ||||
| struct GPUVertFormat *format, | struct GPUVertFormat *format, | ||||
| GPUBatch *geom) | GPUBatch *geom) | ||||
| { | { | ||||
| BLI_assert(geom != NULL); | BLI_assert(geom != nullptr); | ||||
| BLI_assert(format != NULL); | BLI_assert(format != nullptr); | ||||
| DRWCallBuffer *callbuf = BLI_memblock_alloc(DST.vmempool->callbuffers); | DRWCallBuffer *callbuf = static_cast<DRWCallBuffer *>( | ||||
| BLI_memblock_alloc(DST.vmempool->callbuffers)); | |||||
| callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count); | callbuf->buf = DRW_temp_buffer_request(DST.vmempool->idatalist, format, &callbuf->count); | ||||
| callbuf->buf_select = NULL; | callbuf->buf_select = nullptr; | ||||
| callbuf->count = 0; | callbuf->count = 0; | ||||
| if (G.f & G_FLAG_PICKSEL) { | if (G.f & G_FLAG_PICKSEL) { | ||||
| /* Not actually used for rendering but alloced in one chunk. */ | /* Not actually used for rendering but allocated in one chunk. */ | ||||
| if (inst_select_format.attr_len == 0) { | if (inst_select_format.attr_len == 0) { | ||||
| GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); | GPU_vertformat_attr_add(&inst_select_format, "selectId", GPU_COMP_I32, 1, GPU_FETCH_INT); | ||||
| } | } | ||||
| callbuf->buf_select = DRW_temp_buffer_request( | callbuf->buf_select = DRW_temp_buffer_request( | ||||
| DST.vmempool->idatalist, &inst_select_format, &callbuf->count); | DST.vmempool->idatalist, &inst_select_format, &callbuf->count); | ||||
| drw_command_set_select_id(shgroup, callbuf->buf_select, -1); | drw_command_set_select_id(shgroup, callbuf->buf_select, -1); | ||||
| } | } | ||||
| DRWResourceHandle handle = drw_resource_handle(shgroup, NULL, NULL); | DRWResourceHandle handle = drw_resource_handle(shgroup, nullptr, nullptr); | ||||
| GPUBatch *batch = DRW_temp_batch_instance_request( | GPUBatch *batch = DRW_temp_batch_instance_request( | ||||
| DST.vmempool->idatalist, callbuf->buf, NULL, geom); | DST.vmempool->idatalist, callbuf->buf, nullptr, geom); | ||||
| drw_command_draw(shgroup, batch, handle); | drw_command_draw(shgroup, batch, handle); | ||||
| return callbuf; | return callbuf; | ||||
| } | } | ||||
| void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data) | void DRW_buffer_add_entry_struct(DRWCallBuffer *callbuf, const void *data) | ||||
| { | { | ||||
| GPUVertBuf *buf = callbuf->buf; | GPUVertBuf *buf = callbuf->buf; | ||||
| ▲ Show 20 Lines • Show All 44 Lines • ▼ Show 20 Lines | |||||
| /** \} */ | /** \} */ | ||||
| /* -------------------------------------------------------------------- */ | /* -------------------------------------------------------------------- */ | ||||
| /** \name Shading Groups (DRW_shgroup) | /** \name Shading Groups (DRW_shgroup) | ||||
| * \{ */ | * \{ */ | ||||
| static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader) | static void drw_shgroup_init(DRWShadingGroup *shgroup, GPUShader *shader) | ||||
| { | { | ||||
| shgroup->uniforms = NULL; | shgroup->uniforms = nullptr; | ||||
| shgroup->uniform_attrs = NULL; | shgroup->uniform_attrs = nullptr; | ||||
| int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW); | int view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_VIEW); | ||||
| int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL); | int model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_MODEL); | ||||
| int info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_INFO); | int info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_INFO); | ||||
| int baseinst_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_BASE_INSTANCE); | int baseinst_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_BASE_INSTANCE); | ||||
| int chunkid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_CHUNK); | int chunkid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_CHUNK); | ||||
| int resourceid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_ID); | int resourceid_location = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_RESOURCE_ID); | ||||
| /* TODO(@fclem): Will take the place of the above after the GPUShaderCreateInfo port. */ | /* TODO(@fclem): Will take the place of the above after the GPUShaderCreateInfo port. */ | ||||
| if (view_ubo_location == -1) { | if (view_ubo_location == -1) { | ||||
| view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_VIEW); | view_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_VIEW); | ||||
| } | } | ||||
| if (model_ubo_location == -1) { | if (model_ubo_location == -1) { | ||||
| model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_MODEL); | model_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_MODEL); | ||||
| } | } | ||||
| if (info_ubo_location == -1) { | if (info_ubo_location == -1) { | ||||
| info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_INFOS); | info_ubo_location = GPU_shader_get_builtin_block(shader, GPU_UNIFORM_BLOCK_DRW_INFOS); | ||||
| } | } | ||||
| if (chunkid_location != -1) { | if (chunkid_location != -1) { | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, NULL, 0, 0, 1); | shgroup, chunkid_location, DRW_UNIFORM_RESOURCE_CHUNK, nullptr, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| } | } | ||||
| if (resourceid_location != -1) { | if (resourceid_location != -1) { | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, NULL, 0, 0, 1); | shgroup, resourceid_location, DRW_UNIFORM_RESOURCE_ID, nullptr, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| } | } | ||||
| if (baseinst_location != -1) { | if (baseinst_location != -1) { | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, NULL, 0, 0, 1); | shgroup, baseinst_location, DRW_UNIFORM_BASE_INSTANCE, nullptr, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| } | } | ||||
| if (model_ubo_location != -1) { | if (model_ubo_location != -1) { | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, NULL, 0, 0, 1); | shgroup, model_ubo_location, DRW_UNIFORM_BLOCK_OBMATS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| } | } | ||||
| else { | else { | ||||
| /* NOTE: This is only here to support old hardware fallback where uniform buffer is still | /* NOTE: This is only here to support old hardware fallback where uniform buffer is still | ||||
| * too slow or buggy. */ | * too slow or buggy. */ | ||||
| int model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL); | int model = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL); | ||||
| int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV); | int modelinverse = GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODEL_INV); | ||||
| if (model != -1) { | if (model != -1) { | ||||
| drw_shgroup_uniform_create_ex(shgroup, model, DRW_UNIFORM_MODEL_MATRIX, NULL, 0, 0, 1); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, model, DRW_UNIFORM_MODEL_MATRIX, nullptr, GPU_SAMPLER_DEFAULT, 0, 1); | |||||
| } | } | ||||
| if (modelinverse != -1) { | if (modelinverse != -1) { | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex(shgroup, | ||||
| shgroup, modelinverse, DRW_UNIFORM_MODEL_MATRIX_INVERSE, NULL, 0, 0, 1); | modelinverse, | ||||
| DRW_UNIFORM_MODEL_MATRIX_INVERSE, | |||||
| nullptr, | |||||
| GPU_SAMPLER_DEFAULT, | |||||
| 0, | |||||
| 1); | |||||
| } | } | ||||
| } | } | ||||
| if (info_ubo_location != -1) { | if (info_ubo_location != -1) { | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, NULL, 0, 0, 1); | shgroup, info_ubo_location, DRW_UNIFORM_BLOCK_OBINFOS, nullptr, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| /* Abusing this loc to tell shgroup we need the obinfos. */ | /* Abusing this loc to tell shgroup we need the obinfos. */ | ||||
| shgroup->objectinfo = 1; | shgroup->objectinfo = 1; | ||||
| } | } | ||||
| else { | else { | ||||
| shgroup->objectinfo = 0; | shgroup->objectinfo = 0; | ||||
| } | } | ||||
| if (view_ubo_location != -1) { | if (view_ubo_location != -1) { | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, 0, 0, 1); | shgroup, view_ubo_location, DRW_UNIFORM_BLOCK, G_draw.view_ubo, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| } | } | ||||
| #ifdef DEBUG | #ifdef DEBUG | ||||
| int debug_print_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_PRINT); | int debug_print_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_PRINT); | ||||
| if (debug_print_location != -1) { | if (debug_print_location != -1) { | ||||
| GPUStorageBuf *buf = drw_debug_gpu_print_buf_get(); | GPUStorageBuf *buf = drw_debug_gpu_print_buf_get(); | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, debug_print_location, DRW_UNIFORM_STORAGE_BLOCK, buf, 0, 0, 1); | shgroup, debug_print_location, DRW_UNIFORM_STORAGE_BLOCK, buf, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| # ifndef DISABLE_DEBUG_SHADER_PRINT_BARRIER | # ifndef DISABLE_DEBUG_SHADER_PRINT_BARRIER | ||||
| /* Add a barrier to allow multiple shader writing to the same buffer. */ | /* Add a barrier to allow multiple shader writing to the same buffer. */ | ||||
| DRW_shgroup_barrier(shgroup, GPU_BARRIER_SHADER_STORAGE); | DRW_shgroup_barrier(shgroup, GPU_BARRIER_SHADER_STORAGE); | ||||
| # endif | # endif | ||||
| } | } | ||||
| int debug_draw_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_VERTS); | int debug_draw_location = GPU_shader_get_builtin_ssbo(shader, GPU_STORAGE_BUFFER_DEBUG_VERTS); | ||||
| if (debug_draw_location != -1) { | if (debug_draw_location != -1) { | ||||
| GPUStorageBuf *buf = drw_debug_gpu_draw_buf_get(); | GPUStorageBuf *buf = drw_debug_gpu_draw_buf_get(); | ||||
| drw_shgroup_uniform_create_ex( | drw_shgroup_uniform_create_ex( | ||||
| shgroup, debug_draw_location, DRW_UNIFORM_STORAGE_BLOCK, buf, 0, 0, 1); | shgroup, debug_draw_location, DRW_UNIFORM_STORAGE_BLOCK, buf, GPU_SAMPLER_DEFAULT, 0, 1); | ||||
| /* NOTE(fclem): No barrier as ordering is not important. */ | /* NOTE(fclem): No barrier as ordering is not important. */ | ||||
| } | } | ||||
| #endif | #endif | ||||
| /* Not supported. */ | /* Not supported. */ | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW_INV) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MODELVIEW) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_NORMAL) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEW) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEW) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEW_INV) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEW_INV) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEWPROJECTION) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEWPROJECTION) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEWPROJECTION_INV) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_VIEWPROJECTION_INV) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_PROJECTION) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_PROJECTION) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_PROJECTION_INV) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_PROJECTION_INV) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CLIPPLANES) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_CLIPPLANES) == -1); | ||||
| BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP) == -1); | BLI_assert(GPU_shader_get_builtin_uniform(shader, GPU_UNIFORM_MVP) == -1); | ||||
| } | } | ||||
| static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass) | static DRWShadingGroup *drw_shgroup_create_ex(struct GPUShader *shader, DRWPass *pass) | ||||
| { | { | ||||
| DRWShadingGroup *shgroup = BLI_memblock_alloc(DST.vmempool->shgroups); | DRWShadingGroup *shgroup = static_cast<DRWShadingGroup *>( | ||||
| BLI_memblock_alloc(DST.vmempool->shgroups)); | |||||
| BLI_LINKS_APPEND(&pass->shgroups, shgroup); | BLI_LINKS_APPEND(&pass->shgroups, shgroup); | ||||
| shgroup->shader = shader; | shgroup->shader = shader; | ||||
| shgroup->cmd.first = NULL; | shgroup->cmd.first = nullptr; | ||||
| shgroup->cmd.last = NULL; | shgroup->cmd.last = nullptr; | ||||
| shgroup->pass_handle = pass->handle; | shgroup->pass_handle = pass->handle; | ||||
| return shgroup; | return shgroup; | ||||
| } | } | ||||
| static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass) | static DRWShadingGroup *drw_shgroup_material_create_ex(GPUPass *gpupass, DRWPass *pass) | ||||
| { | { | ||||
| if (!gpupass) { | if (!gpupass) { | ||||
| /* Shader compilation error */ | /* Shader compilation error */ | ||||
| return NULL; | return nullptr; | ||||
| } | } | ||||
| GPUShader *sh = GPU_pass_shader_get(gpupass); | GPUShader *sh = GPU_pass_shader_get(gpupass); | ||||
| if (!sh) { | if (!sh) { | ||||
| /* Shader not yet compiled */ | /* Shader not yet compiled */ | ||||
| return NULL; | return nullptr; | ||||
| } | } | ||||
| DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass); | DRWShadingGroup *grp = drw_shgroup_create_ex(sh, pass); | ||||
| return grp; | return grp; | ||||
| } | } | ||||
| static void drw_shgroup_material_texture(DRWShadingGroup *grp, | static void drw_shgroup_material_texture(DRWShadingGroup *grp, | ||||
| GPUTexture *gputex, | GPUTexture *gputex, | ||||
| const char *name, | const char *name, | ||||
| eGPUSamplerState state) | eGPUSamplerState state) | ||||
| { | { | ||||
| DRW_shgroup_uniform_texture_ex(grp, name, gputex, state); | DRW_shgroup_uniform_texture_ex(grp, name, gputex, state); | ||||
| GPUTexture **gputex_ref = BLI_memblock_alloc(DST.vmempool->images); | GPUTexture **gputex_ref = static_cast<GPUTexture **>(BLI_memblock_alloc(DST.vmempool->images)); | ||||
| *gputex_ref = gputex; | *gputex_ref = gputex; | ||||
| GPU_texture_ref(gputex); | GPU_texture_ref(gputex); | ||||
| } | } | ||||
| void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial *material) | void DRW_shgroup_add_material_resources(DRWShadingGroup *grp, struct GPUMaterial *material) | ||||
| { | { | ||||
| ListBase textures = GPU_material_textures(material); | ListBase textures = GPU_material_textures(material); | ||||
| /* Bind all textures needed by the material. */ | /* Bind all textures needed by the material. */ | ||||
| LISTBASE_FOREACH (GPUMaterialTexture *, tex, &textures) { | LISTBASE_FOREACH (GPUMaterialTexture *, tex, &textures) { | ||||
| if (tex->ima) { | if (tex->ima) { | ||||
| /* Image */ | /* Image */ | ||||
| GPUTexture *gputex; | GPUTexture *gputex; | ||||
| ImageUser *iuser = tex->iuser_available ? &tex->iuser : NULL; | ImageUser *iuser = tex->iuser_available ? &tex->iuser : nullptr; | ||||
| if (tex->tiled_mapping_name[0]) { | if (tex->tiled_mapping_name[0]) { | ||||
| gputex = BKE_image_get_gpu_tiles(tex->ima, iuser, NULL); | gputex = BKE_image_get_gpu_tiles(tex->ima, iuser, nullptr); | ||||
| drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state); | drw_shgroup_material_texture( | ||||
| gputex = BKE_image_get_gpu_tilemap(tex->ima, iuser, NULL); | grp, gputex, tex->sampler_name, eGPUSamplerState(tex->sampler_state)); | ||||
| drw_shgroup_material_texture(grp, gputex, tex->tiled_mapping_name, tex->sampler_state); | gputex = BKE_image_get_gpu_tilemap(tex->ima, iuser, nullptr); | ||||
| drw_shgroup_material_texture( | |||||
| grp, gputex, tex->tiled_mapping_name, eGPUSamplerState(tex->sampler_state)); | |||||
| } | } | ||||
| else { | else { | ||||
| gputex = BKE_image_get_gpu_texture(tex->ima, iuser, NULL); | gputex = BKE_image_get_gpu_texture(tex->ima, iuser, nullptr); | ||||
| drw_shgroup_material_texture(grp, gputex, tex->sampler_name, tex->sampler_state); | drw_shgroup_material_texture( | ||||
| grp, gputex, tex->sampler_name, eGPUSamplerState(tex->sampler_state)); | |||||
| } | } | ||||
| } | } | ||||
| else if (tex->colorband) { | else if (tex->colorband) { | ||||
| /* Color Ramp */ | /* Color Ramp */ | ||||
| DRW_shgroup_uniform_texture(grp, tex->sampler_name, *tex->colorband); | DRW_shgroup_uniform_texture(grp, tex->sampler_name, *tex->colorband); | ||||
| } | } | ||||
| else if (tex->sky) { | else if (tex->sky) { | ||||
| /* Sky */ | /* Sky */ | ||||
| DRW_shgroup_uniform_texture_ex(grp, tex->sampler_name, *tex->sky, tex->sampler_state); | DRW_shgroup_uniform_texture_ex( | ||||
| grp, tex->sampler_name, *tex->sky, eGPUSamplerState(tex->sampler_state)); | |||||
| } | } | ||||
| } | } | ||||
| GPUUniformBuf *ubo = GPU_material_uniform_buffer_get(material); | GPUUniformBuf *ubo = GPU_material_uniform_buffer_get(material); | ||||
| if (ubo != NULL) { | if (ubo != nullptr) { | ||||
| DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo); | DRW_shgroup_uniform_block(grp, GPU_UBO_BLOCK_NAME, ubo); | ||||
| } | } | ||||
| const GPUUniformAttrList *uattrs = GPU_material_uniform_attributes(material); | const GPUUniformAttrList *uattrs = GPU_material_uniform_attributes(material); | ||||
| if (uattrs != NULL) { | if (uattrs != nullptr) { | ||||
| int loc = GPU_shader_get_uniform_block_binding(grp->shader, GPU_ATTRIBUTE_UBO_BLOCK_NAME); | int loc = GPU_shader_get_uniform_block_binding(grp->shader, GPU_ATTRIBUTE_UBO_BLOCK_NAME); | ||||
| drw_shgroup_uniform_create_ex(grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, 0, 0, 1); | drw_shgroup_uniform_create_ex( | ||||
| grp, loc, DRW_UNIFORM_BLOCK_OBATTRS, uattrs, GPU_SAMPLER_DEFAULT, 0, 1); | |||||
| grp->uniform_attrs = uattrs; | grp->uniform_attrs = uattrs; | ||||
| } | } | ||||
| } | } | ||||
| GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[], | GPUVertFormat *DRW_shgroup_instance_format_array(const DRWInstanceAttrFormat attrs[], | ||||
| int arraysize) | int arraysize) | ||||
| { | { | ||||
| GPUVertFormat *format = MEM_callocN(sizeof(GPUVertFormat), "GPUVertFormat"); | GPUVertFormat *format = MEM_cnew<GPUVertFormat>(__func__); | ||||
| for (int i = 0; i < arraysize; i++) { | for (int i = 0; i < arraysize; i++) { | ||||
| GPU_vertformat_attr_add(format, | GPU_vertformat_attr_add(format, | ||||
| attrs[i].name, | attrs[i].name, | ||||
| (attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32, | (attrs[i].type == DRW_ATTR_INT) ? GPU_COMP_I32 : GPU_COMP_F32, | ||||
| attrs[i].components, | attrs[i].components, | ||||
| (attrs[i].type == DRW_ATTR_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT); | (attrs[i].type == DRW_ATTR_INT) ? GPU_FETCH_INT : GPU_FETCH_FLOAT); | ||||
| } | } | ||||
| Show All 18 Lines | DRWShadingGroup *DRW_shgroup_create(struct GPUShader *shader, DRWPass *pass) | ||||
| drw_shgroup_init(shgroup, shader); | drw_shgroup_init(shgroup, shader); | ||||
| return shgroup; | return shgroup; | ||||
| } | } | ||||
| DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, | DRWShadingGroup *DRW_shgroup_transform_feedback_create(struct GPUShader *shader, | ||||
| DRWPass *pass, | DRWPass *pass, | ||||
| GPUVertBuf *tf_target) | GPUVertBuf *tf_target) | ||||
| { | { | ||||
| BLI_assert(tf_target != NULL); | BLI_assert(tf_target != nullptr); | ||||
| DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); | DRWShadingGroup *shgroup = drw_shgroup_create_ex(shader, pass); | ||||
| drw_shgroup_init(shgroup, shader); | drw_shgroup_init(shgroup, shader); | ||||
| drw_shgroup_uniform_create_ex(shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, 0, 0, 1); | drw_shgroup_uniform_create_ex( | ||||
| shgroup, 0, DRW_UNIFORM_TFEEDBACK_TARGET, tf_target, GPU_SAMPLER_DEFAULT, 0, 1); | |||||
| return shgroup; | return shgroup; | ||||
| } | } | ||||
| void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state) | void DRW_shgroup_state_enable(DRWShadingGroup *shgroup, DRWState state) | ||||
| { | { | ||||
| drw_command_set_mutable_state(shgroup, state, 0x0); | drw_command_set_mutable_state(shgroup, state, DRW_STATE_NO_DRAW); | ||||
| } | } | ||||
| void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state) | void DRW_shgroup_state_disable(DRWShadingGroup *shgroup, DRWState state) | ||||
| { | { | ||||
| drw_command_set_mutable_state(shgroup, 0x0, state); | drw_command_set_mutable_state(shgroup, DRW_STATE_NO_DRAW, state); | ||||
| } | } | ||||
| void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup, | void DRW_shgroup_stencil_set(DRWShadingGroup *shgroup, | ||||
| uint write_mask, | uint write_mask, | ||||
| uint reference, | uint reference, | ||||
| uint compare_mask) | uint compare_mask) | ||||
| { | { | ||||
| drw_command_set_stencil_mask(shgroup, write_mask, reference, compare_mask); | drw_command_set_stencil_mask(shgroup, write_mask, reference, compare_mask); | ||||
| Show All 26 Lines | for (int i = 0; i < chunk->command_used; i++) { | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| return true; | return true; | ||||
| } | } | ||||
| DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup) | DRWShadingGroup *DRW_shgroup_create_sub(DRWShadingGroup *shgroup) | ||||
| { | { | ||||
| DRWShadingGroup *shgroup_new = BLI_memblock_alloc(DST.vmempool->shgroups); | DRWShadingGroup *shgroup_new = static_cast<DRWShadingGroup *>( | ||||
| BLI_memblock_alloc(DST.vmempool->shgroups)); | |||||
| *shgroup_new = *shgroup; | *shgroup_new = *shgroup; | ||||
| drw_shgroup_init(shgroup_new, shgroup_new->shader); | drw_shgroup_init(shgroup_new, shgroup_new->shader); | ||||
| shgroup_new->cmd.first = NULL; | shgroup_new->cmd.first = nullptr; | ||||
| shgroup_new->cmd.last = NULL; | shgroup_new->cmd.last = nullptr; | ||||
| DRWPass *parent_pass = DRW_memblock_elem_from_handle(DST.vmempool->passes, | DRWPass *parent_pass = static_cast<DRWPass *>( | ||||
| &shgroup->pass_handle); | DRW_memblock_elem_from_handle(DST.vmempool->passes, &shgroup->pass_handle)); | ||||
| BLI_LINKS_INSERT_AFTER(&parent_pass->shgroups, shgroup, shgroup_new); | BLI_LINKS_INSERT_AFTER(&parent_pass->shgroups, shgroup, shgroup_new); | ||||
| return shgroup_new; | return shgroup_new; | ||||
| } | } | ||||
| /** \} */ | /** \} */ | ||||
| ▲ Show 20 Lines • Show All 181 Lines • ▼ Show 20 Lines | else { | ||||
| bsphere->radius = len_v3v3(bsphere->center, farpoint); | bsphere->radius = len_v3v3(bsphere->center, farpoint); | ||||
| } | } | ||||
| } | } | ||||
| static void draw_view_matrix_state_update(ViewInfos *storage, | static void draw_view_matrix_state_update(ViewInfos *storage, | ||||
| const float viewmat[4][4], | const float viewmat[4][4], | ||||
| const float winmat[4][4]) | const float winmat[4][4]) | ||||
| { | { | ||||
| copy_m4_m4(storage->viewmat, viewmat); | copy_m4_m4(storage->viewmat.values, viewmat); | ||||
| invert_m4_m4(storage->viewinv, storage->viewmat); | invert_m4_m4(storage->viewinv.values, storage->viewmat.values); | ||||
| copy_m4_m4(storage->winmat, winmat); | copy_m4_m4(storage->winmat.values, winmat); | ||||
| invert_m4_m4(storage->wininv, storage->winmat); | invert_m4_m4(storage->wininv.values, storage->winmat.values); | ||||
| mul_m4_m4m4(storage->persmat, winmat, viewmat); | mul_m4_m4m4(storage->persmat.values, winmat, viewmat); | ||||
| invert_m4_m4(storage->persinv, storage->persmat); | invert_m4_m4(storage->persinv.values, storage->persmat.values); | ||||
| const bool is_persp = (winmat[3][3] == 0.0f); | const bool is_persp = (winmat[3][3] == 0.0f); | ||||
| /* Near clip distance. */ | /* Near clip distance. */ | ||||
| storage->viewvecs[0][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] - 1.0f) : | storage->viewvecs[0][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] - 1.0f) : | ||||
| -(winmat[3][2] + 1.0f) / winmat[2][2]; | -(winmat[3][2] + 1.0f) / winmat[2][2]; | ||||
| /* Far clip distance. */ | /* Far clip distance. */ | ||||
| storage->viewvecs[1][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] + 1.0f) : | storage->viewvecs[1][3] = (is_persp) ? -winmat[3][2] / (winmat[2][2] + 1.0f) : | ||||
| -(winmat[3][2] - 1.0f) / winmat[2][2]; | -(winmat[3][2] - 1.0f) / winmat[2][2]; | ||||
| /* view vectors for the corners of the view frustum. | /* view vectors for the corners of the view frustum. | ||||
| * Can be used to recreate the world space position easily */ | * Can be used to recreate the world space position easily */ | ||||
| float view_vecs[4][3] = { | float view_vecs[4][3] = { | ||||
| {-1.0f, -1.0f, -1.0f}, | {-1.0f, -1.0f, -1.0f}, | ||||
| {1.0f, -1.0f, -1.0f}, | {1.0f, -1.0f, -1.0f}, | ||||
| {-1.0f, 1.0f, -1.0f}, | {-1.0f, 1.0f, -1.0f}, | ||||
| {-1.0f, -1.0f, 1.0f}, | {-1.0f, -1.0f, 1.0f}, | ||||
| }; | }; | ||||
| /* convert the view vectors to view space */ | /* convert the view vectors to view space */ | ||||
| for (int i = 0; i < 4; i++) { | for (int i = 0; i < 4; i++) { | ||||
| mul_project_m4_v3(storage->wininv, view_vecs[i]); | mul_project_m4_v3(storage->wininv.values, view_vecs[i]); | ||||
| /* normalized trick see: | /* normalized trick see: | ||||
| * http://www.derschmale.com/2014/01/26/reconstructing-positions-from-the-depth-buffer */ | * http://www.derschmale.com/2014/01/26/reconstructing-positions-from-the-depth-buffer */ | ||||
| if (is_persp) { | if (is_persp) { | ||||
| /* Divide XY by Z. */ | /* Divide XY by Z. */ | ||||
| mul_v2_fl(view_vecs[i], 1.0f / view_vecs[i][2]); | mul_v2_fl(view_vecs[i], 1.0f / view_vecs[i][2]); | ||||
| } | } | ||||
| } | } | ||||
| /** | /** | ||||
| * If ortho : view_vecs[0] is the near-bottom-left corner of the frustum and | * - When orthographic: | ||||
| * view_vecs[1] is the vector going from the near-bottom-left corner to | * `view_vecs[0]` is the near-bottom-left corner of the frustum and | ||||
| * `view_vecs[1]` is the vector going from the near-bottom-left corner to | |||||
| * the far-top-right corner. | * the far-top-right corner. | ||||
| * If Persp : view_vecs[0].xy and view_vecs[1].xy are respectively the bottom-left corner | * - When perspective: | ||||
| * when Z = 1, and top-left corner if Z = 1. | * `view_vecs[0].xy` and `view_vecs[1].xy` are respectively the bottom-left corner | ||||
| * view_vecs[0].z the near clip distance and view_vecs[1].z is the (signed) | * when Z = 1, and top-left corner if `Z = 1`. | ||||
| * `view_vecs[0].z` the near clip distance and `view_vecs[1].z` is the (signed) | |||||
| * distance from the near plane to the far clip plane. | * distance from the near plane to the far clip plane. | ||||
| */ | */ | ||||
| copy_v3_v3(storage->viewvecs[0], view_vecs[0]); | copy_v3_v3(storage->viewvecs[0], view_vecs[0]); | ||||
| /* we need to store the differences */ | /* we need to store the differences */ | ||||
| storage->viewvecs[1][0] = view_vecs[1][0] - view_vecs[0][0]; | storage->viewvecs[1][0] = view_vecs[1][0] - view_vecs[0][0]; | ||||
| storage->viewvecs[1][1] = view_vecs[2][1] - view_vecs[0][1]; | storage->viewvecs[1][1] = view_vecs[2][1] - view_vecs[0][1]; | ||||
| storage->viewvecs[1][2] = view_vecs[3][2] - view_vecs[0][2]; | storage->viewvecs[1][2] = view_vecs[3][2] - view_vecs[0][2]; | ||||
| } | } | ||||
| DRWView *DRW_view_create(const float viewmat[4][4], | DRWView *DRW_view_create(const float viewmat[4][4], | ||||
| const float winmat[4][4], | const float winmat[4][4], | ||||
| const float (*culling_viewmat)[4], | const float (*culling_viewmat)[4], | ||||
| const float (*culling_winmat)[4], | const float (*culling_winmat)[4], | ||||
| DRWCallVisibilityFn *visibility_fn) | DRWCallVisibilityFn *visibility_fn) | ||||
| { | { | ||||
| DRWView *view = BLI_memblock_alloc(DST.vmempool->views); | DRWView *view = static_cast<DRWView *>(BLI_memblock_alloc(DST.vmempool->views)); | ||||
| if (DST.primary_view_num < MAX_CULLED_VIEWS) { | if (DST.primary_view_num < MAX_CULLED_VIEWS) { | ||||
| view->culling_mask = 1u << DST.primary_view_num++; | view->culling_mask = 1u << DST.primary_view_num++; | ||||
| } | } | ||||
| else { | else { | ||||
| BLI_assert(0); | BLI_assert(0); | ||||
| view->culling_mask = 0u; | view->culling_mask = 0u; | ||||
| } | } | ||||
| view->clip_planes_len = 0; | view->clip_planes_len = 0; | ||||
| view->visibility_fn = visibility_fn; | view->visibility_fn = visibility_fn; | ||||
| view->parent = NULL; | view->parent = nullptr; | ||||
| copy_v4_fl4(view->storage.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f); | copy_v4_fl4(view->storage.viewcamtexcofac, 1.0f, 1.0f, 0.0f, 0.0f); | ||||
| if (DST.draw_ctx.evil_C && DST.draw_ctx.region) { | if (DST.draw_ctx.evil_C && DST.draw_ctx.region) { | ||||
| int region_origin[2] = {DST.draw_ctx.region->winrct.xmin, DST.draw_ctx.region->winrct.ymin}; | int region_origin[2] = {DST.draw_ctx.region->winrct.xmin, DST.draw_ctx.region->winrct.ymin}; | ||||
| struct wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C); | struct wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C); | ||||
| wm_cursor_position_get(win, &view->storage.mouse_pixel[0], &view->storage.mouse_pixel[1]); | wm_cursor_position_get(win, &view->storage.mouse_pixel[0], &view->storage.mouse_pixel[1]); | ||||
| sub_v2_v2v2_int(view->storage.mouse_pixel, view->storage.mouse_pixel, region_origin); | sub_v2_v2v2_int(view->storage.mouse_pixel, view->storage.mouse_pixel, region_origin); | ||||
| } | } | ||||
| DRW_view_update(view, viewmat, winmat, culling_viewmat, culling_winmat); | DRW_view_update(view, viewmat, winmat, culling_viewmat, culling_winmat); | ||||
| return view; | return view; | ||||
| } | } | ||||
| DRWView *DRW_view_create_sub(const DRWView *parent_view, | DRWView *DRW_view_create_sub(const DRWView *parent_view, | ||||
| const float viewmat[4][4], | const float viewmat[4][4], | ||||
| const float winmat[4][4]) | const float winmat[4][4]) | ||||
| { | { | ||||
| /* Search original parent. */ | /* Search original parent. */ | ||||
| const DRWView *ori_view = parent_view; | const DRWView *ori_view = parent_view; | ||||
| while (ori_view->parent != NULL) { | while (ori_view->parent != nullptr) { | ||||
| ori_view = ori_view->parent; | ori_view = ori_view->parent; | ||||
| } | } | ||||
| DRWView *view = BLI_memblock_alloc(DST.vmempool->views); | DRWView *view = static_cast<DRWView *>(BLI_memblock_alloc(DST.vmempool->views)); | ||||
| /* Perform copy. */ | /* Perform copy. */ | ||||
| *view = *ori_view; | *view = *ori_view; | ||||
| view->parent = (DRWView *)ori_view; | view->parent = (DRWView *)ori_view; | ||||
| DRW_view_update_sub(view, viewmat, winmat); | DRW_view_update_sub(view, viewmat, winmat); | ||||
| return view; | return view; | ||||
| } | } | ||||
| /* DRWView Update: | /* DRWView Update: | ||||
| * This is meant to be done on existing views when rendering in a loop and there is no | * This is meant to be done on existing views when rendering in a loop and there is no | ||||
| * need to allocate more DRWViews. */ | * need to allocate more DRWViews. */ | ||||
| void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4]) | void DRW_view_update_sub(DRWView *view, const float viewmat[4][4], const float winmat[4][4]) | ||||
| { | { | ||||
| BLI_assert(view->parent != NULL); | BLI_assert(view->parent != nullptr); | ||||
| view->is_dirty = true; | view->is_dirty = true; | ||||
| view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat)); | view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat)); | ||||
| draw_view_matrix_state_update(&view->storage, viewmat, winmat); | draw_view_matrix_state_update(&view->storage, viewmat, winmat); | ||||
| } | } | ||||
| void DRW_view_update(DRWView *view, | void DRW_view_update(DRWView *view, | ||||
| const float viewmat[4][4], | const float viewmat[4][4], | ||||
| const float winmat[4][4], | const float winmat[4][4], | ||||
| const float (*culling_viewmat)[4], | const float (*culling_viewmat)[4], | ||||
| const float (*culling_winmat)[4]) | const float (*culling_winmat)[4]) | ||||
| { | { | ||||
| /* DO NOT UPDATE THE DEFAULT VIEW. | /* DO NOT UPDATE THE DEFAULT VIEW. | ||||
| * Create sub-views instead, or a copy. */ | * Create sub-views instead, or a copy. */ | ||||
| BLI_assert(view != DST.view_default); | BLI_assert(view != DST.view_default); | ||||
| BLI_assert(view->parent == NULL); | BLI_assert(view->parent == nullptr); | ||||
| view->is_dirty = true; | view->is_dirty = true; | ||||
| view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat)); | view->is_inverted = (is_negative_m4(viewmat) == is_negative_m4(winmat)); | ||||
| draw_view_matrix_state_update(&view->storage, viewmat, winmat); | draw_view_matrix_state_update(&view->storage, viewmat, winmat); | ||||
| /* Prepare frustum culling. */ | /* Prepare frustum culling. */ | ||||
| Show All 16 Lines | |||||
| #endif | #endif | ||||
| float wininv[4][4]; | float wininv[4][4]; | ||||
| if (culling_winmat) { | if (culling_winmat) { | ||||
| winmat = culling_winmat; | winmat = culling_winmat; | ||||
| invert_m4_m4(wininv, winmat); | invert_m4_m4(wininv, winmat); | ||||
| } | } | ||||
| else { | else { | ||||
| copy_m4_m4(wininv, view->storage.wininv); | copy_m4_m4(wininv, view->storage.wininv.values); | ||||
| } | } | ||||
| float viewinv[4][4]; | float viewinv[4][4]; | ||||
| if (culling_viewmat) { | if (culling_viewmat) { | ||||
| viewmat = culling_viewmat; | viewmat = culling_viewmat; | ||||
| invert_m4_m4(viewinv, viewmat); | invert_m4_m4(viewinv, viewmat); | ||||
| } | } | ||||
| else { | else { | ||||
| copy_m4_m4(viewinv, view->storage.viewinv); | copy_m4_m4(viewinv, view->storage.viewinv.values); | ||||
| } | } | ||||
| draw_frustum_boundbox_calc(viewinv, winmat, &view->frustum_corners); | draw_frustum_boundbox_calc(viewinv, winmat, &view->frustum_corners); | ||||
| draw_frustum_culling_planes_calc(view->storage.persmat, view->frustum_planes); | draw_frustum_culling_planes_calc(view->storage.persmat.values, view->frustum_planes); | ||||
| draw_frustum_bound_sphere_calc( | draw_frustum_bound_sphere_calc( | ||||
| &view->frustum_corners, viewinv, winmat, wininv, &view->frustum_bsphere); | &view->frustum_corners, viewinv, winmat, wininv, &view->frustum_bsphere); | ||||
| /* TODO(fclem): Deduplicate. */ | /* TODO(fclem): Deduplicate. */ | ||||
| for (int i = 0; i < 8; i++) { | for (int i = 0; i < 8; i++) { | ||||
| copy_v3_v3(view->storage.frustum_corners[i], view->frustum_corners.vec[i]); | copy_v3_v3(view->storage.frustum_corners[i], view->frustum_corners.vec[i]); | ||||
| } | } | ||||
| for (int i = 0; i < 6; i++) { | for (int i = 0; i < 6; i++) { | ||||
| Show All 11 Lines | |||||
| const DRWView *DRW_view_default_get(void) | const DRWView *DRW_view_default_get(void) | ||||
| { | { | ||||
| return DST.view_default; | return DST.view_default; | ||||
| } | } | ||||
| void DRW_view_reset(void) | void DRW_view_reset(void) | ||||
| { | { | ||||
| DST.view_default = NULL; | DST.view_default = nullptr; | ||||
| DST.view_active = NULL; | DST.view_active = nullptr; | ||||
| DST.view_previous = NULL; | DST.view_previous = nullptr; | ||||
| } | } | ||||
| void DRW_view_default_set(const DRWView *view) | void DRW_view_default_set(const DRWView *view) | ||||
| { | { | ||||
| BLI_assert(DST.view_default == NULL); | BLI_assert(DST.view_default == nullptr); | ||||
| DST.view_default = (DRWView *)view; | DST.view_default = (DRWView *)view; | ||||
| } | } | ||||
| void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len) | void DRW_view_clip_planes_set(DRWView *view, float (*planes)[4], int plane_len) | ||||
| { | { | ||||
| BLI_assert(plane_len <= MAX_CLIP_PLANES); | BLI_assert(plane_len <= MAX_CLIP_PLANES); | ||||
| view->clip_planes_len = plane_len; | view->clip_planes_len = plane_len; | ||||
| if (plane_len > 0) { | if (plane_len > 0) { | ||||
| Show All 25 Lines | |||||
| { | { | ||||
| view = (view) ? view : DST.view_default; | view = (view) ? view : DST.view_default; | ||||
| return view->storage.winmat[3][3] == 0.0f; | return view->storage.winmat[3][3] == 0.0f; | ||||
| } | } | ||||
| float DRW_view_near_distance_get(const DRWView *view) | float DRW_view_near_distance_get(const DRWView *view) | ||||
| { | { | ||||
| view = (view) ? view : DST.view_default; | view = (view) ? view : DST.view_default; | ||||
| const float(*projmat)[4] = view->storage.winmat; | const float4x4 &projmat = view->storage.winmat; | ||||
| if (DRW_view_is_persp_get(view)) { | if (DRW_view_is_persp_get(view)) { | ||||
| return -projmat[3][2] / (projmat[2][2] - 1.0f); | return -projmat[3][2] / (projmat[2][2] - 1.0f); | ||||
| } | } | ||||
| return -(projmat[3][2] + 1.0f) / projmat[2][2]; | return -(projmat[3][2] + 1.0f) / projmat[2][2]; | ||||
| } | } | ||||
| float DRW_view_far_distance_get(const DRWView *view) | float DRW_view_far_distance_get(const DRWView *view) | ||||
| { | { | ||||
| view = (view) ? view : DST.view_default; | view = (view) ? view : DST.view_default; | ||||
| const float(*projmat)[4] = view->storage.winmat; | const float4x4 &projmat = view->storage.winmat; | ||||
| if (DRW_view_is_persp_get(view)) { | if (DRW_view_is_persp_get(view)) { | ||||
| return -projmat[3][2] / (projmat[2][2] + 1.0f); | return -projmat[3][2] / (projmat[2][2] + 1.0f); | ||||
| } | } | ||||
| return -(projmat[3][2] - 1.0f) / projmat[2][2]; | return -(projmat[3][2] - 1.0f) / projmat[2][2]; | ||||
| } | } | ||||
| void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse) | void DRW_view_viewmat_get(const DRWView *view, float mat[4][4], bool inverse) | ||||
| { | { | ||||
| view = (view) ? view : DST.view_default; | view = (view) ? view : DST.view_default; | ||||
| const ViewInfos *storage = &view->storage; | const ViewInfos *storage = &view->storage; | ||||
| copy_m4_m4(mat, (inverse) ? storage->viewinv : storage->viewmat); | copy_m4_m4(mat, (inverse) ? storage->viewinv.values : storage->viewmat.values); | ||||
| } | } | ||||
| void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse) | void DRW_view_winmat_get(const DRWView *view, float mat[4][4], bool inverse) | ||||
| { | { | ||||
| view = (view) ? view : DST.view_default; | view = (view) ? view : DST.view_default; | ||||
| const ViewInfos *storage = &view->storage; | const ViewInfos *storage = &view->storage; | ||||
| copy_m4_m4(mat, (inverse) ? storage->wininv : storage->winmat); | copy_m4_m4(mat, (inverse) ? storage->wininv.values : storage->winmat.values); | ||||
| } | } | ||||
| void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse) | void DRW_view_persmat_get(const DRWView *view, float mat[4][4], bool inverse) | ||||
| { | { | ||||
| view = (view) ? view : DST.view_default; | view = (view) ? view : DST.view_default; | ||||
| const ViewInfos *storage = &view->storage; | const ViewInfos *storage = &view->storage; | ||||
| copy_m4_m4(mat, (inverse) ? storage->persinv : storage->persmat); | copy_m4_m4(mat, (inverse) ? storage->persinv.values : storage->persmat.values); | ||||
| } | } | ||||
| /** \} */ | /** \} */ | ||||
| /* -------------------------------------------------------------------- */ | /* -------------------------------------------------------------------- */ | ||||
| /** \name Passes (DRW_pass) | /** \name Passes (DRW_pass) | ||||
| * \{ */ | * \{ */ | ||||
| DRWPass *DRW_pass_create(const char *name, DRWState state) | DRWPass *DRW_pass_create(const char *name, DRWState state) | ||||
| { | { | ||||
| DRWPass *pass = BLI_memblock_alloc(DST.vmempool->passes); | DRWPass *pass = static_cast<DRWPass *>(BLI_memblock_alloc(DST.vmempool->passes)); | ||||
| pass->state = state | DRW_STATE_PROGRAM_POINT_SIZE; | pass->state = state | DRW_STATE_PROGRAM_POINT_SIZE; | ||||
| if (G.debug & G_DEBUG_GPU) { | if (G.debug & G_DEBUG_GPU) { | ||||
| BLI_strncpy(pass->name, name, MAX_PASS_NAME); | BLI_strncpy(pass->name, name, MAX_PASS_NAME); | ||||
| } | } | ||||
| pass->shgroups.first = NULL; | pass->shgroups.first = nullptr; | ||||
| pass->shgroups.last = NULL; | pass->shgroups.last = nullptr; | ||||
| pass->handle = DST.pass_handle; | pass->handle = DST.pass_handle; | ||||
| DRW_handle_increment(&DST.pass_handle); | DRW_handle_increment(&DST.pass_handle); | ||||
| pass->original = NULL; | pass->original = nullptr; | ||||
| pass->next = NULL; | pass->next = nullptr; | ||||
| return pass; | return pass; | ||||
| } | } | ||||
| DRWPass *DRW_pass_create_instance(const char *name, DRWPass *original, DRWState state) | DRWPass *DRW_pass_create_instance(const char *name, DRWPass *original, DRWState state) | ||||
| { | { | ||||
| DRWPass *pass = DRW_pass_create(name, state); | DRWPass *pass = DRW_pass_create(name, state); | ||||
| pass->original = original; | pass->original = original; | ||||
| return pass; | return pass; | ||||
| } | } | ||||
| void DRW_pass_link(DRWPass *first, DRWPass *second) | void DRW_pass_link(DRWPass *first, DRWPass *second) | ||||
| { | { | ||||
| BLI_assert(first != second); | BLI_assert(first != second); | ||||
| BLI_assert(first->next == NULL); | BLI_assert(first->next == nullptr); | ||||
| first->next = second; | first->next = second; | ||||
| } | } | ||||
| bool DRW_pass_is_empty(DRWPass *pass) | bool DRW_pass_is_empty(DRWPass *pass) | ||||
| { | { | ||||
| if (pass->original) { | if (pass->original) { | ||||
| return DRW_pass_is_empty(pass->original); | return DRW_pass_is_empty(pass->original); | ||||
| } | } | ||||
| ▲ Show 20 Lines • Show All 42 Lines • ▼ Show 20 Lines | |||||
| #define SORT_IMPL_FUNC shgroup_sort_fn_r | #define SORT_IMPL_FUNC shgroup_sort_fn_r | ||||
| #include "../../blenlib/intern/list_sort_impl.h" | #include "../../blenlib/intern/list_sort_impl.h" | ||||
| #undef SORT_IMPL_FUNC | #undef SORT_IMPL_FUNC | ||||
| #undef SORT_IMPL_LINKTYPE | #undef SORT_IMPL_LINKTYPE | ||||
| void DRW_pass_sort_shgroup_z(DRWPass *pass) | void DRW_pass_sort_shgroup_z(DRWPass *pass) | ||||
| { | { | ||||
| const float(*viewinv)[4] = DST.view_active->storage.viewinv; | const float4x4 &viewinv = DST.view_active->storage.viewinv; | ||||
| if (!(pass->shgroups.first && pass->shgroups.first->next)) { | if (!(pass->shgroups.first && pass->shgroups.first->next)) { | ||||
| /* Nothing to sort */ | /* Nothing to sort */ | ||||
| return; | return; | ||||
| } | } | ||||
| uint index = 0; | uint index = 0; | ||||
| DRWShadingGroup *shgroup = pass->shgroups.first; | DRWShadingGroup *shgroup = pass->shgroups.first; | ||||
| Show All 9 Lines | for (; cmd_chunk && handle == 0; cmd_chunk = cmd_chunk->next) { | ||||
| } | } | ||||
| } | } | ||||
| /* To be sorted a shgroup needs to have at least one draw command. */ | /* To be sorted a shgroup needs to have at least one draw command. */ | ||||
| /* FIXME(fclem): In some case, we can still have empty shading group to sort. However their | /* FIXME(fclem): In some case, we can still have empty shading group to sort. However their | ||||
| * final order is not well defined. | * final order is not well defined. | ||||
| * (see T76730 & D7729). */ | * (see T76730 & D7729). */ | ||||
| // BLI_assert(handle != 0); | // BLI_assert(handle != 0); | ||||
| DRWObjectMatrix *obmats = DRW_memblock_elem_from_handle(DST.vmempool->obmats, &handle); | DRWObjectMatrix *obmats = static_cast<DRWObjectMatrix *>( | ||||
| DRW_memblock_elem_from_handle(DST.vmempool->obmats, &handle)); | |||||
| /* Compute distance to camera. */ | /* Compute distance to camera. */ | ||||
| float tmp[3]; | float tmp[3]; | ||||
| sub_v3_v3v3(tmp, viewinv[3], obmats->model[3]); | sub_v3_v3v3(tmp, viewinv[3], obmats->model[3]); | ||||
| shgroup->z_sorting.distance = dot_v3v3(viewinv[2], tmp); | shgroup->z_sorting.distance = dot_v3v3(viewinv[2], tmp); | ||||
| shgroup->z_sorting.original_index = index++; | shgroup->z_sorting.original_index = index++; | ||||
| } while ((shgroup = shgroup->next)); | } while ((shgroup = shgroup->next)); | ||||
| Show All 21 Lines | |||||