Changeset View
Changeset View
Standalone View
Standalone View
source/blender/gpu/intern/gpu_select_pick.c
- This file was added.
| /* | |||||
| * ***** BEGIN GPL LICENSE BLOCK ***** | |||||
| * | |||||
| * This program is free software; you can redistribute it and/or | |||||
| * modify it under the terms of the GNU General Public License | |||||
| * as published by the Free Software Foundation; either version 2 | |||||
| * of the License, or (at your option) any later version. | |||||
| * | |||||
| * This program is distributed in the hope that it will be useful, | |||||
| * but WITHOUT ANY WARRANTY; without even the implied warranty of | |||||
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |||||
| * GNU General Public License for more details. | |||||
| * | |||||
| * You should have received a copy of the GNU General Public License | |||||
| * along with this program; if not, write to the Free Software Foundation, | |||||
| * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |||||
| * | |||||
| * The Original Code is Copyright (C) 2017 Blender Foundation. | |||||
| * All rights reserved. | |||||
| * | |||||
| * ***** END GPL LICENSE BLOCK ***** | |||||
| */ | |||||
| /** \file blender/gpu/intern/gpu_select_pick.c | |||||
| * \ingroup gpu | |||||
| * | |||||
| * Custom select code for picking small regions (not efficient for large regions). | |||||
| * `gpu_select_pick_*` API. | |||||
| */ | |||||
| #include <string.h> | |||||
| #include <stdlib.h> | |||||
| #include <float.h> | |||||
| #include "GPU_select.h" | |||||
| #include "GPU_extensions.h" | |||||
| #include "GPU_glew.h" | |||||
| #include "MEM_guardedalloc.h" | |||||
| #include "BLI_rect.h" | |||||
| #include "BLI_listbase.h" | |||||
| #include "BLI_utildefines.h" | |||||
| #include "gpu_select_private.h" | |||||
| /* #define DEBUG_PRINT */ | |||||
| /* Alloc number for depths */ | |||||
| #define ALLOC_DEPTHS 200 | |||||
| /* For looping over a sub-region of a rect, could be moved into 'rct.c'*/ | |||||
| typedef struct SubRectStride { | |||||
| unsigned int start; /* start here */ | |||||
| unsigned int span; /* read these */ | |||||
| unsigned int skip; /* skip those */ | |||||
| unsigned int len; /* len times (read span 'len' times). */ | |||||
| } SubRectStride; | |||||
| /** | |||||
| * Calculate values needed for looping over a sub-region (smaller buffer within a larger buffer). | |||||
| * | |||||
| * 'src' must be bigger than 'dst'. | |||||
| */ | |||||
| static void rect_subregion_stride_calc(const rcti *src, const rcti *dst, SubRectStride *r_sub) | |||||
| { | |||||
| const int src_x = BLI_rcti_size_x(src); | |||||
| const int src_y = BLI_rcti_size_y(src); | |||||
| const int dst_x = BLI_rcti_size_x(dst); | |||||
| const int dst_y = BLI_rcti_size_y(dst); | |||||
| const int x = dst->xmin - src->xmin; | |||||
| const int y = dst->ymin - src->ymin; | |||||
| BLI_assert(dst_x <= src_x && dst_y <= src_y); | |||||
| BLI_assert(x >= 0 && y >= 0); | |||||
| r_sub->start = (src_x * y) + x; | |||||
| r_sub->span = dst_x; | |||||
| r_sub->skip = src_x - dst_x; | |||||
| r_sub->len = dst_y; | |||||
| } | |||||
| /* store result of glReadPixels */ | |||||
| typedef struct DepthBufCache { | |||||
| struct DepthBufCache *next, *prev; | |||||
| unsigned int id; | |||||
| float buf[0]; | |||||
| } DepthBufCache; | |||||
| static DepthBufCache *depth_buf_malloc(unsigned int rect_len) | |||||
| { | |||||
| DepthBufCache *rect = MEM_mallocN(sizeof(DepthBufCache) + sizeof(float) * rect_len, __func__); | |||||
| rect->id = SELECT_ID_NONE; | |||||
| return rect; | |||||
| } | |||||
| typedef struct DepthID { | |||||
| unsigned int id; | |||||
| float depth; | |||||
| } DepthID; | |||||
| static int depth_id_cmp(const void *v1, const void *v2) | |||||
| { | |||||
| const DepthID *d1 = v1, *d2 = v2; | |||||
| if (d1->id < d2->id) { | |||||
| return -1; | |||||
| } | |||||
| else if (d1->id > d2->id) { | |||||
| return 1; | |||||
| } | |||||
| else { | |||||
| return 0; | |||||
| } | |||||
| } | |||||
| static int depth_cmp(const void *v1, const void *v2) | |||||
| { | |||||
| const DepthID *d1 = v1, *d2 = v2; | |||||
| if (d1->depth < d2->depth) { | |||||
| return -1; | |||||
| } | |||||
| else if (d1->depth > d2->depth) { | |||||
| return 1; | |||||
| } | |||||
| else { | |||||
| return 0; | |||||
| } | |||||
| } | |||||
| /* depth sorting */ | |||||
| typedef struct GPUPickState { | |||||
| /* cache on initialization */ | |||||
| unsigned int (*buffer)[4]; | |||||
| /* buffer size (stores number of integers, for actual size multiply by sizeof integer)*/ | |||||
| unsigned int bufsize; | |||||
| /* mode of operation */ | |||||
| char mode; | |||||
| /* OpenGL drawing, never use when (is_cached == false). */ | |||||
| struct { | |||||
| DepthBufCache *rect_depth; | |||||
| /* scratch buffer, avoid allocs every time */ | |||||
| DepthBufCache *rect_depth_test; | |||||
| /* Pass to glReadPixels (x,y,w,h) */ | |||||
| int clip_readpixels[4]; | |||||
| bool is_init; | |||||
| unsigned int prev_id; | |||||
| } gl; | |||||
| /* Data stored in 'cache' and 'gl' */ | |||||
| struct { | |||||
| rcti clip_rect; | |||||
| unsigned int rect_len; | |||||
| } src; | |||||
| /* Use when cached region is smaller (where src -> dst isn't 1:1) */ | |||||
| struct { | |||||
| rcti clip_rect; | |||||
| unsigned int rect_len; | |||||
| } dst; | |||||
| bool use_cache; | |||||
| bool is_cached; | |||||
| struct { | |||||
| /* Cleanup used for iterating over both source and destination buffers: | |||||
| * src.clip_rect -> dst.clip_rect */ | |||||
| SubRectStride sub_rect; | |||||
| ListBase bufs; | |||||
| } cache; | |||||
| /* Pickign methods */ | |||||
| union { | |||||
| /* GPU_SELECT_PICK_SORT_ALL */ | |||||
| struct { | |||||
| DepthID *hits; | |||||
| unsigned int hits_len; | |||||
| unsigned int hits_len_alloc; | |||||
| } all; | |||||
| /* GPU_SELECT_PICK_SORT_NEAREST */ | |||||
| struct { | |||||
| unsigned int *rect_id; | |||||
| } nearest; | |||||
| }; | |||||
| } GPUPickState; | |||||
| static GPUPickState g_pick_state = {0}; | |||||
| void gpu_select_pick_begin( | |||||
| unsigned int (*buffer)[4], unsigned int bufsize, | |||||
| const rcti *input, char mode) | |||||
| { | |||||
| GPUPickState *ps = &g_pick_state; | |||||
| #ifdef DEBUG_PRINT | |||||
| printf("%s: mode=%d, use_cache=%d, is_cache=%d\n", __func__, mode, ps->use_cache, ps->is_cached); | |||||
| #endif | |||||
| ps->bufsize = bufsize; | |||||
| ps->buffer = buffer; | |||||
| ps->mode = mode; | |||||
| const unsigned int rect_len = BLI_rcti_size_x(input) * BLI_rcti_size_y(input); | |||||
| ps->dst.clip_rect = *input; | |||||
| ps->dst.rect_len = rect_len; | |||||
| /* Restrict OpenGL operations for when we don't have cache */ | |||||
| if (ps->is_cached == false) { | |||||
| float viewport[4]; | |||||
| ps->src.clip_rect = *input; | |||||
| ps->src.rect_len = rect_len; | |||||
| glGetFloatv(GL_SCISSOR_BOX, viewport); | |||||
| ps->gl.clip_readpixels[0] = viewport[0]; | |||||
| ps->gl.clip_readpixels[1] = viewport[1]; | |||||
| ps->gl.clip_readpixels[2] = BLI_rcti_size_x(&ps->src.clip_rect); | |||||
| ps->gl.clip_readpixels[3] = BLI_rcti_size_y(&ps->src.clip_rect); | |||||
| glViewport(UNPACK4(ps->gl.clip_readpixels)); | |||||
| glPushAttrib(GL_DEPTH_BUFFER_BIT | GL_VIEWPORT_BIT); | |||||
| /* disable writing to the framebuffer */ | |||||
| glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE); | |||||
| /* It's possible we don't want to clear depth buffer, | |||||
| * so existing elements are masked by current z-buffer. */ | |||||
| glClear(GL_DEPTH_BUFFER_BIT); | |||||
| /* scratch buffer (read new values here) */ | |||||
| ps->gl.rect_depth_test = depth_buf_malloc(rect_len); | |||||
| ps->gl.rect_depth = depth_buf_malloc(rect_len); | |||||
| /* set initial 'far' value */ | |||||
| #if 0 | |||||
| glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_FLOAT, ps->gl.rect_depth->buf); | |||||
| #else | |||||
| for (unsigned int i = 0; i < rect_len; i++) { | |||||
| ps->gl.rect_depth->buf[i] = 1.0; | |||||
| } | |||||
| #endif | |||||
| ps->gl.is_init = false; | |||||
| ps->gl.prev_id = 0; | |||||
| if (mode == GPU_SELECT_PICK_SORT_ALL) { | |||||
| glEnable(GL_DEPTH_TEST); | |||||
| glDepthMask(GL_TRUE); | |||||
| glDepthFunc(GL_ALWAYS); | |||||
| } | |||||
| else { | |||||
| glEnable(GL_DEPTH_TEST); | |||||
| glDepthMask(GL_TRUE); | |||||
| glDepthFunc(GL_LEQUAL); | |||||
| } | |||||
| } | |||||
| else { | |||||
| /* src.clip_rect -> dst.clip_rect */ | |||||
| rect_subregion_stride_calc(&ps->src.clip_rect, &ps->dst.clip_rect, &ps->cache.sub_rect); | |||||
| BLI_assert(ps->gl.rect_depth == NULL); | |||||
| BLI_assert(ps->gl.rect_depth_test == NULL); | |||||
| } | |||||
| if (mode == GPU_SELECT_PICK_SORT_ALL) { | |||||
| ps->all.hits = MEM_mallocN(sizeof(*ps->all.hits) * ALLOC_DEPTHS, __func__); | |||||
| ps->all.hits_len = 0; | |||||
| ps->all.hits_len_alloc = ALLOC_DEPTHS; | |||||
| } | |||||
| else { | |||||
| ps->nearest.rect_id = MEM_mallocN(sizeof(unsigned int) * ps->dst.rect_len, __func__); | |||||
| memset(ps->nearest.rect_id, 0xff, sizeof(unsigned int) * ps->dst.rect_len); | |||||
| } | |||||
| } | |||||
| /** | |||||
| * Given 2x depths, we know are different - update the depth information | |||||
| * use for both cached/uncached depth buffers. | |||||
| */ | |||||
| static void gpu_select_load_id_pass(const DepthBufCache *rect_depth, const DepthBufCache *rect_depth_test) | |||||
| { | |||||
| GPUPickState *ps = &g_pick_state; | |||||
| const unsigned int prev_id = rect_depth_test->id; | |||||
| if (g_pick_state.mode == GPU_SELECT_PICK_SORT_ALL) { | |||||
| /* find the best depth for this pass and store in 'all.hits' */ | |||||
| float depth_best = FLT_MAX; | |||||
| #define UPDATE_DEPTH_TEST() \ | |||||
| if (*curr != *prev) { \ | |||||
| if (depth_best > *curr) { \ | |||||
| depth_best = *curr; \ | |||||
| } \ | |||||
| } ((void)0) | |||||
| if (ps->is_cached == false) { | |||||
| const float *prev = rect_depth->buf; | |||||
| const float *curr = rect_depth_test->buf; | |||||
| BLI_assert(ps->src.rect_len == ps->dst.rect_len); | |||||
| const unsigned int rect_len = ps->src.rect_len; | |||||
| for (unsigned int i = 0; i < rect_len; i++, curr++, prev++) { | |||||
| UPDATE_DEPTH_TEST(); | |||||
| } | |||||
| } | |||||
| else { | |||||
| /* same as above but different rect sizes */ | |||||
| const float *prev = rect_depth->buf + ps->cache.sub_rect.start; | |||||
| const float *curr = rect_depth_test->buf + ps->cache.sub_rect.start; | |||||
| for (unsigned int i = 0; i < ps->cache.sub_rect.len; i++) { | |||||
| const float *prev_final = prev + ps->cache.sub_rect.span; | |||||
| for (; prev < prev_final; prev++, curr++) { | |||||
| UPDATE_DEPTH_TEST(); | |||||
| } | |||||
| prev += ps->cache.sub_rect.skip; | |||||
| curr += ps->cache.sub_rect.skip; | |||||
| } | |||||
| } | |||||
| #undef UPDATE_DEPTH_TEST | |||||
| /* ensure enough space */ | |||||
| if (UNLIKELY(ps->all.hits_len == ps->all.hits_len_alloc)) { | |||||
| ps->all.hits_len_alloc += ALLOC_DEPTHS; | |||||
| ps->all.hits = MEM_reallocN(ps->all.hits, ps->all.hits_len_alloc * sizeof(*ps->all.hits)); | |||||
| } | |||||
| DepthID *d = &ps->all.hits[ps->all.hits_len++]; | |||||
| d->id = prev_id; | |||||
| d->depth = depth_best; | |||||
| } | |||||
| else { | |||||
| /* keep track each pixels ID in 'nearest.rect_id' */ | |||||
| if (prev_id != SELECT_ID_NONE) { | |||||
| unsigned int *id_ptr = ps->nearest.rect_id; | |||||
| #define UPDATE_ID_TEST() \ | |||||
| if (*curr != *prev) { \ | |||||
| *id_ptr = prev_id; \ | |||||
| } ((void)0) | |||||
| if (ps->is_cached == false) { | |||||
| const float *prev = rect_depth->buf; | |||||
| const float *curr = rect_depth_test->buf; | |||||
| BLI_assert(ps->src.rect_len == ps->dst.rect_len); | |||||
| const unsigned int rect_len = ps->src.rect_len; | |||||
| for (unsigned int i = 0; i < rect_len; i++, curr++, prev++, id_ptr++) { | |||||
| UPDATE_ID_TEST(); | |||||
| } | |||||
| } | |||||
| else { | |||||
| /* same as above but different rect sizes */ | |||||
| const float *prev = rect_depth->buf + ps->cache.sub_rect.start; | |||||
| const float *curr = rect_depth_test->buf + ps->cache.sub_rect.start; | |||||
| for (unsigned int i = 0; i < ps->cache.sub_rect.len; i++) { | |||||
| const float *prev_final = prev + ps->cache.sub_rect.span; | |||||
| for (; prev < prev_final; prev++, curr++, id_ptr++) { | |||||
| UPDATE_ID_TEST(); | |||||
| } | |||||
| prev += ps->cache.sub_rect.skip; | |||||
| curr += ps->cache.sub_rect.skip; | |||||
| } | |||||
| } | |||||
| #undef UPDATE_ID_TEST | |||||
| } | |||||
| } | |||||
| } | |||||
| bool gpu_select_pick_load_id(unsigned int id) | |||||
| { | |||||
| GPUPickState *ps = &g_pick_state; | |||||
| if (ps->gl.is_init) { | |||||
| const unsigned int rect_len = ps->src.rect_len; | |||||
| glReadPixels(UNPACK4(ps->gl.clip_readpixels), GL_DEPTH_COMPONENT, GL_FLOAT, ps->gl.rect_depth_test->buf); | |||||
| /* perform initial memcmp since most cases the array remains unchanged */ | |||||
| if (memcmp(ps->gl.rect_depth->buf, ps->gl.rect_depth_test->buf, rect_len * sizeof(float)) != 0) { | |||||
| ps->gl.rect_depth_test->id = ps->gl.prev_id; | |||||
| gpu_select_load_id_pass(ps->gl.rect_depth, ps->gl.rect_depth_test); | |||||
| /* Store depth in cache */ | |||||
| if (ps->use_cache) { | |||||
| BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth); | |||||
| ps->gl.rect_depth = depth_buf_malloc(ps->src.rect_len); | |||||
| } | |||||
| SWAP(DepthBufCache *, ps->gl.rect_depth_test, ps->gl.rect_depth); | |||||
| } | |||||
| } | |||||
| ps->gl.is_init = true; | |||||
| ps->gl.prev_id = id; | |||||
| return true; | |||||
| } | |||||
| unsigned int gpu_select_pick_end(void) | |||||
| { | |||||
| GPUPickState *ps = &g_pick_state; | |||||
| if (ps->is_cached == false) { | |||||
| if (ps->gl.is_init) { | |||||
| /* force finishing last pass */ | |||||
| gpu_select_pick_load_id(ps->gl.prev_id); | |||||
| } | |||||
| glPopAttrib(); | |||||
| glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE); | |||||
| } | |||||
| /* assign but never free directly since it may be in cache */ | |||||
| DepthBufCache *rect_depth_final; | |||||
| /* Store depth in cache */ | |||||
| if (ps->use_cache && !ps->is_cached) { | |||||
| BLI_addtail(&ps->cache.bufs, ps->gl.rect_depth); | |||||
| ps->gl.rect_depth = NULL; | |||||
| rect_depth_final = ps->cache.bufs.last; | |||||
| } | |||||
| else if (ps->is_cached) { | |||||
| rect_depth_final = ps->cache.bufs.last; | |||||
| } | |||||
| else { | |||||
| /* common case, no cache */ | |||||
| rect_depth_final = ps->gl.rect_depth; | |||||
| } | |||||
| unsigned int maxhits = g_pick_state.bufsize; | |||||
| DepthID *depth_data; | |||||
| unsigned int depth_data_len = 0; | |||||
| if (g_pick_state.mode == GPU_SELECT_PICK_SORT_ALL) { | |||||
| depth_data = ps->all.hits; | |||||
| depth_data_len = ps->all.hits_len; | |||||
| /* move ownership */ | |||||
| ps->all.hits = NULL; | |||||
| ps->all.hits_len = 0; | |||||
| ps->all.hits_len_alloc = 0; | |||||
| } | |||||
| else { | |||||
| /* GPU_SELECT_PICK_SORT_NEAREST */ | |||||
| /* Over alloc (unlikely we have as many depths as pixels) */ | |||||
| unsigned int depth_data_len_first_pass = 0; | |||||
| depth_data = MEM_mallocN(ps->dst.rect_len * sizeof(*depth_data), __func__); | |||||
| /* Partially de-duplicating copy, | |||||
| * when contiguous ID's are found - update their closest depth. | |||||
| * This isn't essential but means there is less data to sort. */ | |||||
| #define STORE_ID_TEST(i_src, i_dst) \ | |||||
| { \ | |||||
| const unsigned int id = ps->nearest.rect_id[i_dst]; \ | |||||
| if (id != SELECT_ID_NONE) { \ | |||||
| const float depth = rect_depth_final->buf[i_src]; \ | |||||
| if (depth_last == NULL || depth_last->id != id) { \ | |||||
| DepthID *d = &depth_data[depth_data_len_first_pass++]; \ | |||||
| d->id = id; \ | |||||
| d->depth = depth; \ | |||||
| } \ | |||||
| else if (depth_last->depth > depth) { \ | |||||
| depth_last->depth = depth; \ | |||||
| } \ | |||||
| } \ | |||||
| } ((void)0) | |||||
| { | |||||
| DepthID *depth_last = NULL; | |||||
| if (ps->is_cached == false) { | |||||
| for (unsigned int i = 0; i < ps->src.rect_len; i++) { | |||||
| STORE_ID_TEST(i, i); | |||||
| } | |||||
| } | |||||
| else { | |||||
| /* same as above but different rect sizes */ | |||||
| unsigned int i_src = ps->cache.sub_rect.start, i_dst = 0; | |||||
| for (unsigned int j = 0; j < ps->cache.sub_rect.len; j++) { | |||||
| const unsigned int i_src_final = i_src + ps->cache.sub_rect.span; | |||||
| for (; i_src < i_src_final; i_src++, i_dst++) { | |||||
| STORE_ID_TEST(i_src, i_dst); | |||||
| } | |||||
| i_src += ps->cache.sub_rect.skip; | |||||
| } | |||||
| } | |||||
| } | |||||
| #undef STORE_ID_TEST | |||||
| qsort(depth_data, depth_data_len_first_pass, sizeof(DepthID), depth_id_cmp); | |||||
| /* Sort by ID's then keep the best depth for each ID */ | |||||
| depth_data_len = 0; | |||||
| { | |||||
| DepthID *depth_last = NULL; | |||||
| for (unsigned int i = 0; i < depth_data_len_first_pass; i++) { | |||||
| if (depth_last == NULL || depth_last->id != depth_data[i].id) { | |||||
| depth_last = &depth_data[depth_data_len++]; | |||||
| *depth_last = depth_data[i]; | |||||
| } | |||||
| else if (depth_last->depth > depth_data[i].depth) { | |||||
| depth_last->depth = depth_data[i].depth; | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| /* Finally sort each unique (id, depth) pair by depth | |||||
| * so the final hit-list is sorted by depth (nearest first) */ | |||||
| unsigned int hits = 0; | |||||
| qsort(depth_data, depth_data_len, sizeof(DepthID), depth_cmp); | |||||
| for (unsigned int i = 0; i < depth_data_len; i++) { | |||||
| if (hits < maxhits) { | |||||
| /* first 3 are dummy values */ | |||||
| g_pick_state.buffer[hits][0] = 1; | |||||
| g_pick_state.buffer[hits][1] = 0x0; | |||||
| g_pick_state.buffer[hits][2] = 0x0; | |||||
| g_pick_state.buffer[hits][3] = depth_data[i].id; | |||||
| hits++; | |||||
| } | |||||
| else { | |||||
| hits = -1; | |||||
| break; | |||||
| } | |||||
| } | |||||
| MEM_freeN(depth_data); | |||||
| MEM_SAFE_FREE(ps->gl.rect_depth); | |||||
| MEM_SAFE_FREE(ps->gl.rect_depth_test); | |||||
| if (g_pick_state.mode == GPU_SELECT_PICK_SORT_ALL) { | |||||
| /* 'hits' already freed as 'depth_data' */ | |||||
| } | |||||
| else { | |||||
| MEM_freeN(ps->nearest.rect_id); | |||||
| ps->nearest.rect_id = NULL; | |||||
| } | |||||
| if (ps->use_cache) { | |||||
| ps->is_cached = true; | |||||
| } | |||||
| return hits; | |||||
| } | |||||
| /* ---------------------------------------------------------------------------- | |||||
| * Caching | |||||
| * | |||||
| * Support multiple begin/end's reusing depth buffers. | |||||
| */ | |||||
| void gpu_select_pick_cache_begin(void) | |||||
| { | |||||
| BLI_assert(g_pick_state.use_cache == false); | |||||
| #ifdef DEBUG_PRINT | |||||
| printf("%s\n", __func__); | |||||
| #endif | |||||
| g_pick_state.use_cache = true; | |||||
| g_pick_state.is_cached = false; | |||||
| } | |||||
| void gpu_select_pick_cache_end(void) | |||||
| { | |||||
| #ifdef DEBUG_PRINT | |||||
| printf("%s: with %d buffers\n", __func__, BLI_listbase_count(&g_pick_state.cache.bufs)); | |||||
| #endif | |||||
| g_pick_state.use_cache = false; | |||||
| g_pick_state.is_cached = false; | |||||
| BLI_freelistN(&g_pick_state.cache.bufs); | |||||
| } | |||||
| /* is drawing needed? */ | |||||
| bool gpu_select_pick_is_cached(void) | |||||
| { | |||||
| return g_pick_state.is_cached; | |||||
| } | |||||
| void gpu_select_pick_cache_load_id(void) | |||||
| { | |||||
| BLI_assert(g_pick_state.is_cached == true); | |||||
| GPUPickState *ps = &g_pick_state; | |||||
| for (DepthBufCache *rect_depth = ps->cache.bufs.first; rect_depth; rect_depth = rect_depth->next) { | |||||
| if (rect_depth->next != NULL) { | |||||
| #ifdef DEBUG_PRINT | |||||
| printf("%s: using cached depth buffer\n", __func__); | |||||
| #endif | |||||
| gpu_select_load_id_pass(rect_depth, rect_depth->next); | |||||
| } | |||||
| } | |||||
| } | |||||