Page MenuHome

Fix T77346: Win/Intel GPU crashes material preview and final render
AbandonedPublic

Authored by Jeroen Bakker (jbakker) on Jul 6 2020, 9:33 AM.

Details

Summary

When not running in the main thread, the local shaders workaround would
remove the compiled shader and load it when it was used from the main
thread. During this time the gpu interface could not be used. In the
recent eevee cleanup the gpu interface was needed to bind the resources.

This patch will keep the shader around until the shader is replaced
during drawing. This patch assumes that the binding can still be reused
as the shaders are the same. During test I didn't notice any issues.

In order to test this, apply next diff

diff --git a/source/blender/gpu/intern/gpu_extensions.c b/source/blender/gpu/intern/gpu_extensions.c
index 4e4e9c526d0..03dd1956fa9 100644
--- a/source/blender/gpu/intern/gpu_extensions.c
+++ b/source/blender/gpu/intern/gpu_extensions.c
@@ -224,7 +224,7 @@ bool GPU_unused_fb_slot_workaround(void)
 
 bool GPU_context_local_shaders_workaround(void)
 {
-  return GG.context_local_shaders_workaround;
+  return true;  // GG.context_local_shaders_workaround;
 }
 
 bool GPU_texture_copy_workaround(void)

Diff Detail

Repository
rB Blender
Branch
T77346 (branched from master)
Build Status
Buildable 8854
Build 8854: arc lint + arc unit

Event Timeline

Jeroen Bakker (jbakker) requested review of this revision.Jul 6 2020, 9:33 AM

Spelling in comments

Clément Foucault (fclem) requested changes to this revision.Jul 6 2020, 3:21 PM

If I understand you are deleting a shader from another context it was created from. It might be an issue if the context is destroyed before you free the shader (see WM_opengl_context_dispose(comp->gl_context);).

This revision now requires changes to proceed.Jul 6 2020, 3:21 PM

I did some more experiments, but they all seem to fail somewhere. The next fails when eevee wants to attach the program (which has been unloaded).

BLI_assert failed: /home/jeroen/blender-git/blender/source/blender/gpu/intern/gpu_shader.c:672, GPU_shader_bind(), at 'shader && shader->program'

Is called before the shaders have been loaded via the main thread. It seems like we shouldn't support this work-a-round anymore in 2.90

diff --git a/source/blender/gpu/GPU_shader.h b/source/blender/gpu/GPU_shader.h
index 0ad472113c9..f9a3c7864c7 100644
--- a/source/blender/gpu/GPU_shader.h
+++ b/source/blender/gpu/GPU_shader.h
@@ -68,6 +68,12 @@ GPUShader *GPU_shader_load_from_binary(const char *binary,
                                        const int binary_format,
                                        const int binary_len,
                                        const char *shname);
+bool GPU_shader_reload_from_binary(GPUShader *shader,
+                                   const char *binary,
:...skipping...
diff --git a/source/blender/gpu/GPU_shader.h b/source/blender/gpu/GPU_shader.h
index 0ad472113c9..f9a3c7864c7 100644
--- a/source/blender/gpu/GPU_shader.h
+++ b/source/blender/gpu/GPU_shader.h
@@ -68,6 +68,12 @@ GPUShader *GPU_shader_load_from_binary(const char *binary,
                                        const int binary_format,
                                        const int binary_len,
                                        const char *shname);
+bool GPU_shader_reload_from_binary(GPUShader *shader,
+                                   const char *binary,
+                                   const int binary_format,
+                                   const int binary_len,
+                                   const char *shname);
+
 struct GPU_ShaderCreateFromArray_Params {
   const char **vert, **geom, **frag, **defs;
 };
@@ -77,6 +83,7 @@ struct GPUShader *GPU_shader_create_from_arrays_impl(
   GPU_shader_create_from_arrays_impl(&(const struct GPU_ShaderCreateFromArray_Params)__VA_ARGS__)
 
 void GPU_shader_free(GPUShader *shader);
+void GPU_shader_free_gpu_resources(GPUShader *shader);
 
 void GPU_shader_bind(GPUShader *shader);
 void GPU_shader_unbind(void);
diff --git a/source/blender/gpu/intern/gpu_codegen.c b/source/blender/gpu/intern/gpu_codegen.c
index c1e7933d7ba..6cbc54c1bf7 100644
--- a/source/blender/gpu/intern/gpu_codegen.c
+++ b/source/blender/gpu/intern/gpu_codegen.c
@@ -1229,16 +1229,15 @@ bool GPU_pass_compile(GPUPass *pass, const char *shname)
     else if (!BLI_thread_is_main() && GPU_context_local_shaders_workaround()) {
       pass->binary.content = GPU_shader_get_binary(
           shader, &pass->binary.format, &pass->binary.len);
-      GPU_shader_free(shader);
-      shader = NULL;
+      GPU_shader_free_gpu_resources(shader);
     }
 
     pass->shader = shader;
     pass->compiled = true;
   }
   else if (pass->binary.content && BLI_thread_is_main()) {
-    pass->shader = GPU_shader_load_from_binary(
-        pass->binary.content, pass->binary.format, pass->binary.len, shname);
+    GPU_shader_reload_from_binary(
+        pass->shader, pass->binary.content, pass->binary.format, pass->binary.len, shname);
     MEM_SAFE_FREE(pass->binary.content);
   }
 
diff --git a/source/blender/gpu/intern/gpu_extensions.c b/source/blender/gpu/intern/gpu_extensions.c
index 0fd373d37a3..663728f3a25 100644
--- a/source/blender/gpu/intern/gpu_extensions.c
+++ b/source/blender/gpu/intern/gpu_extensions.c
@@ -224,7 +224,7 @@ bool GPU_unused_fb_slot_workaround(void)
 
 bool GPU_context_local_shaders_workaround(void)
 {
-  return GG.context_local_shaders_workaround;
+  return true;  // GG.context_local_shaders_workaround;
 }
 
 bool GPU_texture_copy_workaround(void)
diff --git a/source/blender/gpu/intern/gpu_shader.c b/source/blender/gpu/intern/gpu_shader.c
index 669b073232d..db52eb79241 100644
--- a/source/blender/gpu/intern/gpu_shader.c
+++ b/source/blender/gpu/intern/gpu_shader.c
@@ -317,10 +317,11 @@ GPUShader *GPU_shader_create_from_python(const char *vertexcode,
   return sh;
 }
 
-GPUShader *GPU_shader_load_from_binary(const char *binary,
-                                       const int binary_format,
-                                       const int binary_len,
-                                       const char *shname)
+bool GPU_shader_reload_from_binary(GPUShader *shader,
+                                   const char *binary,
+                                   const int binary_format,
+                                   const int binary_len,
+                                   const char *shname)
 {
   BLI_assert(GL_ARB_get_program_binary);
   int success;
@@ -330,9 +331,10 @@ GPUShader *GPU_shader_load_from_binary(const char *binary,
   glGetProgramiv(program, GL_LINK_STATUS, &success);
 
   if (success) {
+    if (shader->interface) {
+      GPU_shaderinterface_discard(shader->interface);
+    }
     glUseProgram(program);
-
-    GPUShader *shader = MEM_callocN(sizeof(*shader), __func__);
     shader->interface = GPU_shaderinterface_create(program);
     shader->program = program;
 
@@ -341,12 +343,23 @@ GPUShader *GPU_shader_load_from_binary(const char *binary,
 #else
     UNUSED_VARS(shname);
 #endif
-
-    return shader;
+    return true;
   }
-
   glDeleteProgram(program);
-  return NULL;
+  return false;
+}
+
+GPUShader *GPU_shader_load_from_binary(const char *binary,
+                                       const int binary_format,
+                                       const int binary_len,
+                                       const char *shname)
+{
+  GPUShader *shader = MEM_callocN(sizeof(*shader), __func__);
+  if (!GPU_shader_reload_from_binary(shader, binary, binary_format, binary_len, shname)) {
+    MEM_freeN(shader);
+    return NULL;
+  }
+  return shader;
 }
 
 #define DEBUG_SHADER_NONE ""
@@ -696,26 +709,34 @@ void GPU_shader_transform_feedback_disable(GPUShader *UNUSED(shader))
   glEndTransformFeedback();
 }
 
-void GPU_shader_free(GPUShader *shader)
+void GPU_shader_free_gpu_resources(GPUShader *shader)
 {
-#if 0 /* Would be nice to have, but for now the Deferred compilation \
-       * does not have a GPUContext. */
-  BLI_assert(GPU_context_active_get() != NULL);
-#endif
   BLI_assert(shader);
 
   if (shader->vertex) {
     glDeleteShader(shader->vertex);
+    shader->vertex = 0;
   }
   if (shader->geometry) {
     glDeleteShader(shader->geometry);
+    shader->geometry = 0;
   }
   if (shader->fragment) {
     glDeleteShader(shader->fragment);
+    shader->fragment = 0;
   }
   if (shader->program) {
     glDeleteProgram(shader->program);
+    shader->program = 0;
   }
+}
+void GPU_shader_free(GPUShader *shader)
+{
+#if 0 /* Would be nice to have, but for now the Deferred compilation \
+       * does not have a GPUContext. */
+  BLI_assert(GPU_context_active_get() != NULL);
+#endif
+  GPU_shader_free_gpu_resources(shader);
 
   if (shader->interface) {
     GPU_shaderinterface_discard(shader->interface);
@@ -726,40 +747,40 @@ void GPU_shader_free(GPUShader *shader)
 
 int GPU_shader_get_uniform(GPUShader *shader, const char *name)
 {
-  BLI_assert(shader && shader->program);
+  BLI_assert(shader && shader->interface);
   const GPUShaderInput *uniform = GPU_shaderinterface_uniform(shader->interface, name);
   return uniform ? uniform->location : -1;
 }
 
 int GPU_shader_get_builtin_uniform(GPUShader *shader, int builtin)
 {
-  BLI_assert(shader && shader->program);
+  BLI_assert(shader && shader->interface);
   return GPU_shaderinterface_uniform_builtin(shader->interface, builtin);
 }
 
 int GPU_shader_get_builtin_block(GPUShader *shader, int builtin)
 {
-  BLI_assert(shader && shader->program);
+  BLI_assert(shader && shader->interface);
   return GPU_shaderinterface_block_builtin(shader->interface, builtin);
 }
 
 int GPU_shader_get_uniform_block(GPUShader *shader, const char *name)
 {
-  BLI_assert(shader && shader->program);
+  BLI_assert(shader && shader->interface);
   const GPUShaderInput *ubo = GPU_shaderinterface_ubo(shader->interface, name);
   return ubo ? ubo->location : -1;
 }
 
 int GPU_shader_get_uniform_block_binding(GPUShader *shader, const char *name)
 {
-  BLI_assert(shader && shader->program);
+  BLI_assert(shader && shader->interface);
   const GPUShaderInput *ubo = GPU_shaderinterface_ubo(shader->interface, name);
   return ubo ? ubo->binding : -1;
 }
 
 int GPU_shader_get_texture_binding(GPUShader *shader, const char *name)
 {
-  BLI_assert(shader && shader->program);
+  BLI_assert(shader && shader->interface);
   const GPUShaderInput *tex = GPU_shaderinterface_uniform(shader->interface, name);
   return tex ? tex->binding : -1;
 }
@@ -861,7 +882,7 @@ void GPU_shader_set_srgb_uniform(const GPUShaderInterface *interface)
 
 int GPU_shader_get_attribute(GPUShader *shader, const char *name)
 {
-  BLI_assert(shader && shader->program);
+  BLI_assert(shader && shader->interface);
   const GPUShaderInput *attr = GPU_shaderinterface_attr(shader->interface, name);
   return attr ? attr->location : -1;
 }