Changeset View
Changeset View
Standalone View
Standalone View
source/blender/draw/intern/draw_manager_shader.c
| Show First 20 Lines • Show All 45 Lines • ▼ Show 20 Lines | |||||
| /** \name Deferred Compilation (DRW_deferred) | /** \name Deferred Compilation (DRW_deferred) | ||||
| * | * | ||||
| * Since compiling shader can take a long time, we do it in a non blocking | * Since compiling shader can take a long time, we do it in a non blocking | ||||
| * manner in another thread. | * manner in another thread. | ||||
| * | * | ||||
| * \{ */ | * \{ */ | ||||
| typedef struct DRWShaderCompiler { | typedef struct DRWShaderCompiler { | ||||
| /** Default compilation queue. */ | |||||
| ListBase queue; /* GPUMaterial */ | ListBase queue; /* GPUMaterial */ | ||||
| SpinLock list_lock; | SpinLock list_lock; | ||||
| /** Optimization queue. */ | |||||
| ListBase optimize_queue; /* GPUMaterial */ | |||||
| void *gl_context; | void *gl_context; | ||||
| GPUContext *gpu_context; | GPUContext *gpu_context; | ||||
| bool own_context; | bool own_context; | ||||
| } DRWShaderCompiler; | } DRWShaderCompiler; | ||||
| static void drw_deferred_shader_compilation_exec( | static void drw_deferred_shader_compilation_exec( | ||||
| void *custom_data, | void *custom_data, | ||||
| /* Cannot be const, this function implements wm_jobs_start_callback. | /* Cannot be const, this function implements wm_jobs_start_callback. | ||||
| Show All 39 Lines | while (true) { | ||||
| if (mat) { | if (mat) { | ||||
| /* Do the compilation. */ | /* Do the compilation. */ | ||||
| GPU_material_compile(mat); | GPU_material_compile(mat); | ||||
| GPU_material_release(mat); | GPU_material_release(mat); | ||||
| MEM_freeN(link); | MEM_freeN(link); | ||||
| } | } | ||||
| else { | else { | ||||
| /* Check for Material Optimization job once there are no more | |||||
| * shaders to compile. */ | |||||
| BLI_spin_lock(&comp->list_lock); | |||||
| /* Pop tail because it will be less likely to lock the main thread | |||||
| * if all GPUMaterials are to be freed (see DRW_deferred_shader_remove()). */ | |||||
| LinkData *link = (LinkData *)BLI_poptail(&comp->optimize_queue); | |||||
| GPUMaterial *optimize_mat = link ? (GPUMaterial *)link->data : NULL; | |||||
| if (optimize_mat) { | |||||
| /* Avoid another thread freeing the material during optimization. */ | |||||
| GPU_material_acquire(optimize_mat); | |||||
| } | |||||
| BLI_spin_unlock(&comp->list_lock); | |||||
| if (optimize_mat) { | |||||
| /* Compile optimized material shader. */ | |||||
| GPU_material_optimize(optimize_mat); | |||||
| GPU_material_release(optimize_mat); | |||||
| MEM_freeN(link); | |||||
| } | |||||
| else { | |||||
| /* No more materials to optimize, or shaders to compile. */ | |||||
| break; | break; | ||||
| } | } | ||||
| } | |||||
| if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL)) { | if (GPU_type_matches_ex(GPU_DEVICE_ANY, GPU_OS_ANY, GPU_DRIVER_ANY, GPU_BACKEND_OPENGL)) { | ||||
| GPU_flush(); | GPU_flush(); | ||||
| } | } | ||||
| } | } | ||||
| GPU_context_active_set(NULL); | GPU_context_active_set(NULL); | ||||
| WM_opengl_context_release(gl_context); | WM_opengl_context_release(gl_context); | ||||
| if (use_main_context_workaround) { | if (use_main_context_workaround) { | ||||
| GPU_context_main_unlock(); | GPU_context_main_unlock(); | ||||
| } | } | ||||
| GPU_render_end(); | GPU_render_end(); | ||||
| } | } | ||||
| static void drw_deferred_shader_compilation_free(void *custom_data) | static void drw_deferred_shader_compilation_free(void *custom_data) | ||||
| { | { | ||||
| DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data; | DRWShaderCompiler *comp = (DRWShaderCompiler *)custom_data; | ||||
| BLI_spin_lock(&comp->list_lock); | BLI_spin_lock(&comp->list_lock); | ||||
| BLI_freelistN(&comp->queue); | BLI_freelistN(&comp->queue); | ||||
| BLI_freelistN(&comp->optimize_queue); | |||||
| BLI_spin_unlock(&comp->list_lock); | BLI_spin_unlock(&comp->list_lock); | ||||
| if (comp->own_context) { | if (comp->own_context) { | ||||
| /* Only destroy if the job owns the context. */ | /* Only destroy if the job owns the context. */ | ||||
| WM_opengl_context_activate(comp->gl_context); | WM_opengl_context_activate(comp->gl_context); | ||||
| GPU_context_active_set(comp->gpu_context); | GPU_context_active_set(comp->gpu_context); | ||||
| GPU_context_discard(comp->gpu_context); | GPU_context_discard(comp->gpu_context); | ||||
| WM_opengl_context_dispose(comp->gl_context); | WM_opengl_context_dispose(comp->gl_context); | ||||
| wm_window_reset_drawable(); | wm_window_reset_drawable(); | ||||
| } | } | ||||
| MEM_freeN(comp); | MEM_freeN(comp); | ||||
| } | } | ||||
| static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred) | /** | ||||
| * Append either shader compilation or optimization job to deferred queue and | |||||
| * ensure shader compilation worker is active. | |||||
| * We keep two separate queue's to ensure core compilations always complete before optimization. | |||||
| */ | |||||
| static void drw_deferred_queue_append(GPUMaterial *mat, bool is_optimization_job) | |||||
| { | { | ||||
| if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) { | |||||
| return; | |||||
| } | |||||
| /* Do not defer the compilation if we are rendering for image. | |||||
| * deferred rendering is only possible when `evil_C` is available */ | |||||
| if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) { | |||||
| deferred = false; | |||||
| } | |||||
| if (!deferred) { | |||||
| DRW_deferred_shader_remove(mat); | |||||
| /* Shaders could already be compiling. Have to wait for compilation to finish. */ | |||||
| while (GPU_material_status(mat) == GPU_MAT_QUEUED) { | |||||
| PIL_sleep_ms(20); | |||||
| } | |||||
| if (GPU_material_status(mat) == GPU_MAT_CREATED) { | |||||
| GPU_material_compile(mat); | |||||
| } | |||||
| return; | |||||
| } | |||||
| /* Don't add material to the queue twice. */ | |||||
| if (GPU_material_status(mat) == GPU_MAT_QUEUED) { | |||||
| return; | |||||
| } | |||||
| const bool use_main_context = GPU_use_main_context_workaround(); | const bool use_main_context = GPU_use_main_context_workaround(); | ||||
| const bool job_own_context = !use_main_context; | const bool job_own_context = !use_main_context; | ||||
| BLI_assert(DST.draw_ctx.evil_C); | BLI_assert(DST.draw_ctx.evil_C); | ||||
| wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C); | wmWindowManager *wm = CTX_wm_manager(DST.draw_ctx.evil_C); | ||||
| wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C); | wmWindow *win = CTX_wm_window(DST.draw_ctx.evil_C); | ||||
| /* Get the running job or a new one if none is running. Can only have one job per type & owner. | /* Get the running job or a new one if none is running. Can only have one job per type & owner. | ||||
| */ | */ | ||||
| wmJob *wm_job = WM_jobs_get( | wmJob *wm_job = WM_jobs_get( | ||||
| wm, win, wm, "Shaders Compilation", 0, WM_JOB_TYPE_SHADER_COMPILATION); | wm, win, wm, "Shaders Compilation", 0, WM_JOB_TYPE_SHADER_COMPILATION); | ||||
| DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job); | DRWShaderCompiler *old_comp = (DRWShaderCompiler *)WM_jobs_customdata_get(wm_job); | ||||
| DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler"); | DRWShaderCompiler *comp = MEM_callocN(sizeof(DRWShaderCompiler), "DRWShaderCompiler"); | ||||
| BLI_spin_init(&comp->list_lock); | BLI_spin_init(&comp->list_lock); | ||||
| if (old_comp) { | if (old_comp) { | ||||
| BLI_spin_lock(&old_comp->list_lock); | BLI_spin_lock(&old_comp->list_lock); | ||||
| BLI_movelisttolist(&comp->queue, &old_comp->queue); | BLI_movelisttolist(&comp->queue, &old_comp->queue); | ||||
| BLI_movelisttolist(&comp->optimize_queue, &old_comp->optimize_queue); | |||||
| BLI_spin_unlock(&old_comp->list_lock); | BLI_spin_unlock(&old_comp->list_lock); | ||||
| /* Do not recreate context, just pass ownership. */ | /* Do not recreate context, just pass ownership. */ | ||||
| if (old_comp->gl_context) { | if (old_comp->gl_context) { | ||||
| comp->gl_context = old_comp->gl_context; | comp->gl_context = old_comp->gl_context; | ||||
| comp->gpu_context = old_comp->gpu_context; | comp->gpu_context = old_comp->gpu_context; | ||||
| old_comp->own_context = false; | old_comp->own_context = false; | ||||
| comp->own_context = job_own_context; | comp->own_context = job_own_context; | ||||
| } | } | ||||
| } | } | ||||
| /* Add to either compilation or optimization queue. */ | |||||
| if (is_optimization_job) { | |||||
| BLI_assert(GPU_material_optimization_status(mat) != GPU_MAT_OPTIMIZATION_QUEUED); | |||||
| GPU_material_optimization_status_set(mat, GPU_MAT_OPTIMIZATION_QUEUED); | |||||
| LinkData *node = BLI_genericNodeN(mat); | |||||
| BLI_addtail(&comp->optimize_queue, node); | |||||
| } | |||||
| else { | |||||
| GPU_material_status_set(mat, GPU_MAT_QUEUED); | GPU_material_status_set(mat, GPU_MAT_QUEUED); | ||||
| LinkData *node = BLI_genericNodeN(mat); | LinkData *node = BLI_genericNodeN(mat); | ||||
| BLI_addtail(&comp->queue, node); | BLI_addtail(&comp->queue, node); | ||||
| } | |||||
| /* Create only one context. */ | /* Create only one context. */ | ||||
| if (comp->gl_context == NULL) { | if (comp->gl_context == NULL) { | ||||
| if (use_main_context) { | if (use_main_context) { | ||||
| comp->gl_context = DST.gl_context; | comp->gl_context = DST.gl_context; | ||||
| comp->gpu_context = DST.gpu_context; | comp->gpu_context = DST.gpu_context; | ||||
| } | } | ||||
| else { | else { | ||||
| comp->gl_context = WM_opengl_context_create(); | comp->gl_context = WM_opengl_context_create(); | ||||
| comp->gpu_context = GPU_context_create(NULL); | comp->gpu_context = GPU_context_create(NULL, comp->gl_context); | ||||
| GPU_context_active_set(NULL); | GPU_context_active_set(NULL); | ||||
| WM_opengl_context_activate(DST.gl_context); | WM_opengl_context_activate(DST.gl_context); | ||||
| GPU_context_active_set(DST.gpu_context); | GPU_context_active_set(DST.gpu_context); | ||||
| } | } | ||||
| comp->own_context = job_own_context; | comp->own_context = job_own_context; | ||||
| } | } | ||||
| WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free); | WM_jobs_customdata_set(wm_job, comp, drw_deferred_shader_compilation_free); | ||||
| WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0); | WM_jobs_timer(wm_job, 0.1, NC_MATERIAL | ND_SHADING_DRAW, 0); | ||||
| WM_jobs_delay_start(wm_job, 0.1); | WM_jobs_delay_start(wm_job, 0.1); | ||||
| WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL); | WM_jobs_callbacks(wm_job, drw_deferred_shader_compilation_exec, NULL, NULL, NULL); | ||||
| G.is_break = false; | G.is_break = false; | ||||
| WM_jobs_start(wm, wm_job); | WM_jobs_start(wm, wm_job); | ||||
| } | } | ||||
| static void drw_deferred_shader_add(GPUMaterial *mat, bool deferred) | |||||
| { | |||||
| if (ELEM(GPU_material_status(mat), GPU_MAT_SUCCESS, GPU_MAT_FAILED)) { | |||||
| return; | |||||
| } | |||||
| /* Do not defer the compilation if we are rendering for image. | |||||
| * deferred rendering is only possible when `evil_C` is available */ | |||||
| if (DST.draw_ctx.evil_C == NULL || DRW_state_is_image_render() || !USE_DEFERRED_COMPILATION) { | |||||
| deferred = false; | |||||
| } | |||||
| if (!deferred) { | |||||
| DRW_deferred_shader_remove(mat); | |||||
| /* Shaders could already be compiling. Have to wait for compilation to finish. */ | |||||
| while (GPU_material_status(mat) == GPU_MAT_QUEUED) { | |||||
| PIL_sleep_ms(20); | |||||
| } | |||||
| if (GPU_material_status(mat) == GPU_MAT_CREATED) { | |||||
| GPU_material_compile(mat); | |||||
| } | |||||
| return; | |||||
| } | |||||
| /* Don't add material to the queue twice. */ | |||||
| if (GPU_material_status(mat) == GPU_MAT_QUEUED) { | |||||
| return; | |||||
| } | |||||
| /* Add deferred shader compilation to queue. */ | |||||
| drw_deferred_queue_append(mat, false); | |||||
| } | |||||
| void DRW_deferred_shader_remove(GPUMaterial *mat) | void DRW_deferred_shader_remove(GPUMaterial *mat) | ||||
| { | { | ||||
| LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) { | LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) { | ||||
| LISTBASE_FOREACH (wmWindow *, win, &wm->windows) { | LISTBASE_FOREACH (wmWindow *, win, &wm->windows) { | ||||
| DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_from_type( | DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_from_type( | ||||
| wm, wm, WM_JOB_TYPE_SHADER_COMPILATION); | wm, wm, WM_JOB_TYPE_SHADER_COMPILATION); | ||||
| if (comp != NULL) { | if (comp != NULL) { | ||||
| BLI_spin_lock(&comp->list_lock); | BLI_spin_lock(&comp->list_lock); | ||||
| /* Search for compilation job in queue. */ | |||||
| LinkData *link = (LinkData *)BLI_findptr(&comp->queue, mat, offsetof(LinkData, data)); | LinkData *link = (LinkData *)BLI_findptr(&comp->queue, mat, offsetof(LinkData, data)); | ||||
| if (link) { | if (link) { | ||||
| BLI_remlink(&comp->queue, link); | BLI_remlink(&comp->queue, link); | ||||
| GPU_material_status_set(link->data, GPU_MAT_CREATED); | GPU_material_status_set(link->data, GPU_MAT_CREATED); | ||||
| } | } | ||||
| BLI_spin_unlock(&comp->list_lock); | |||||
| MEM_SAFE_FREE(link); | MEM_SAFE_FREE(link); | ||||
| /* Search for optimization job in queue. */ | |||||
| LinkData *opti_link = (LinkData *)BLI_findptr( | |||||
| &comp->optimize_queue, mat, offsetof(LinkData, data)); | |||||
| if (opti_link) { | |||||
| BLI_remlink(&comp->optimize_queue, opti_link); | |||||
| GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY); | |||||
| } | |||||
| BLI_spin_unlock(&comp->list_lock); | |||||
| MEM_SAFE_FREE(opti_link); | |||||
| } | |||||
| } | |||||
| } | |||||
| } | |||||
| void DRW_deferred_shader_optimize_remove(GPUMaterial *mat) | |||||
| { | |||||
| LISTBASE_FOREACH (wmWindowManager *, wm, &G_MAIN->wm) { | |||||
| LISTBASE_FOREACH (wmWindow *, win, &wm->windows) { | |||||
| DRWShaderCompiler *comp = (DRWShaderCompiler *)WM_jobs_customdata_from_type( | |||||
| wm, wm, WM_JOB_TYPE_SHADER_COMPILATION); | |||||
| if (comp != NULL) { | |||||
| BLI_spin_lock(&comp->list_lock); | |||||
| /* Search for optimization job in queue. */ | |||||
| LinkData *opti_link = (LinkData *)BLI_findptr( | |||||
| &comp->optimize_queue, mat, offsetof(LinkData, data)); | |||||
| if (opti_link) { | |||||
| BLI_remlink(&comp->optimize_queue, opti_link); | |||||
| GPU_material_optimization_status_set(opti_link->data, GPU_MAT_OPTIMIZATION_READY); | |||||
| } | |||||
| BLI_spin_unlock(&comp->list_lock); | |||||
| MEM_SAFE_FREE(opti_link); | |||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| /** \} */ | /** \} */ | ||||
| /* -------------------------------------------------------------------- */ | /* -------------------------------------------------------------------- */ | ||||
| ▲ Show 20 Lines • Show All 117 Lines • ▼ Show 20 Lines | GPUMaterial *mat = GPU_material_from_nodetree(scene, | ||||
| callback, | callback, | ||||
| thunk); | thunk); | ||||
| if (DRW_state_is_image_render()) { | if (DRW_state_is_image_render()) { | ||||
| /* Do not deferred if doing render. */ | /* Do not deferred if doing render. */ | ||||
| deferred = false; | deferred = false; | ||||
| } | } | ||||
| drw_deferred_shader_add(mat, deferred); | drw_deferred_shader_add(mat, deferred); | ||||
| DRW_shader_queue_optimize_material(mat); | |||||
| return mat; | return mat; | ||||
| } | } | ||||
| GPUMaterial *DRW_shader_from_material(Material *ma, | GPUMaterial *DRW_shader_from_material(Material *ma, | ||||
| struct bNodeTree *ntree, | struct bNodeTree *ntree, | ||||
| const uint64_t shader_id, | const uint64_t shader_id, | ||||
| const bool is_volume_shader, | const bool is_volume_shader, | ||||
| bool deferred, | bool deferred, | ||||
| Show All 13 Lines | GPUMaterial *mat = GPU_material_from_nodetree(scene, | ||||
| thunk); | thunk); | ||||
| if (DRW_state_is_image_render()) { | if (DRW_state_is_image_render()) { | ||||
| /* Do not deferred if doing render. */ | /* Do not deferred if doing render. */ | ||||
| deferred = false; | deferred = false; | ||||
| } | } | ||||
| drw_deferred_shader_add(mat, deferred); | drw_deferred_shader_add(mat, deferred); | ||||
| DRW_shader_queue_optimize_material(mat); | |||||
| return mat; | return mat; | ||||
| } | } | ||||
| void DRW_shader_queue_optimize_material(GPUMaterial *mat) | |||||
| { | |||||
| /* Do not perform deferred optimization if performing render. | |||||
| * De-queue any queued optimization jobs. */ | |||||
| if (DRW_state_is_image_render()) { | |||||
| if (GPU_material_optimization_status(mat) == GPU_MAT_OPTIMIZATION_QUEUED) { | |||||
| /* Remove from pending optimization job queue. */ | |||||
| DRW_deferred_shader_optimize_remove(mat); | |||||
| /* If optimization job had already started, wait for it to complete. */ | |||||
| while (GPU_material_optimization_status(mat) == GPU_MAT_OPTIMIZATION_QUEUED) { | |||||
| PIL_sleep_ms(20); | |||||
| } | |||||
| } | |||||
| return; | |||||
| } | |||||
| /* We do not need to perform optimization on the material if it is already compiled or in the | |||||
| * optimization queue. If optimization is not required, the status will be flagged as | |||||
| * `GPU_MAT_OPTIMIZATION_SKIP`. | |||||
| * We can also skip cases which have already been queued up. */ | |||||
| if (ELEM(GPU_material_optimization_status(mat), | |||||
| GPU_MAT_OPTIMIZATION_SKIP, | |||||
| GPU_MAT_OPTIMIZATION_SUCCESS, | |||||
| GPU_MAT_OPTIMIZATION_QUEUED)) { | |||||
| return; | |||||
| } | |||||
| /* Only queue optimization once the original shader has been successfully compiled. */ | |||||
| if (GPU_material_status(mat) != GPU_MAT_SUCCESS) { | |||||
| return; | |||||
| } | |||||
| /* Defer optimization until sufficient time has passed beyond creation. This avoids excessive | |||||
| * recompilation for shaders which are being actively modified. */ | |||||
| if (!GPU_material_optimization_ready(mat)) { | |||||
| return; | |||||
| } | |||||
| /* Add deferred shader compilation to queue. */ | |||||
| drw_deferred_queue_append(mat, true); | |||||
| } | |||||
| void DRW_shader_free(GPUShader *shader) | void DRW_shader_free(GPUShader *shader) | ||||
| { | { | ||||
| GPU_shader_free(shader); | GPU_shader_free(shader); | ||||
| } | } | ||||
| /** \} */ | /** \} */ | ||||
| /* -------------------------------------------------------------------- */ | /* -------------------------------------------------------------------- */ | ||||
| ▲ Show 20 Lines • Show All 132 Lines • Show Last 20 Lines | |||||