Changeset View
Changeset View
Standalone View
Standalone View
intern/cycles/kernel/integrator/integrator_volume_stack.h
| Show All 18 Lines | |||||
| CCL_NAMESPACE_BEGIN | CCL_NAMESPACE_BEGIN | ||||
| /* Volume Stack | /* Volume Stack | ||||
| * | * | ||||
| * This is an array of object/shared ID's that the current segment of the path | * This is an array of object/shared ID's that the current segment of the path | ||||
| * is inside of. */ | * is inside of. */ | ||||
| template<typename StackReadOp, typename StackWriteOp> | template<typename StackReadOp, typename StackWriteOp> | ||||
| ccl_device void volume_stack_enter_exit(INTEGRATOR_STATE_ARGS, | ccl_device void volume_stack_enter_exit(KernelGlobals kg, | ||||
| ccl_private const ShaderData *sd, | ccl_private const ShaderData *sd, | ||||
| StackReadOp stack_read, | StackReadOp stack_read, | ||||
| StackWriteOp stack_write) | StackWriteOp stack_write) | ||||
| { | { | ||||
| /* todo: we should have some way for objects to indicate if they want the | /* todo: we should have some way for objects to indicate if they want the | ||||
| * world shader to work inside them. excluding it by default is problematic | * world shader to work inside them. excluding it by default is problematic | ||||
| * because non-volume objects can't be assumed to be closed manifolds */ | * because non-volume objects can't be assumed to be closed manifolds */ | ||||
| if (!(sd->flag & SD_HAS_VOLUME)) { | if (!(sd->flag & SD_HAS_VOLUME)) { | ||||
| ▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | else { | ||||
| /* Add to the end of the stack. */ | /* Add to the end of the stack. */ | ||||
| const VolumeStack new_entry = {sd->object, sd->shader}; | const VolumeStack new_entry = {sd->object, sd->shader}; | ||||
| const VolumeStack empty_entry = {OBJECT_NONE, SHADER_NONE}; | const VolumeStack empty_entry = {OBJECT_NONE, SHADER_NONE}; | ||||
| stack_write(i, new_entry); | stack_write(i, new_entry); | ||||
| stack_write(i + 1, empty_entry); | stack_write(i + 1, empty_entry); | ||||
| } | } | ||||
| } | } | ||||
| ccl_device void volume_stack_enter_exit(INTEGRATOR_STATE_ARGS, ccl_private const ShaderData *sd) | ccl_device void volume_stack_enter_exit(KernelGlobals kg, | ||||
| IntegratorState state, | |||||
| ccl_private const ShaderData *sd) | |||||
| { | { | ||||
| volume_stack_enter_exit( | volume_stack_enter_exit( | ||||
| INTEGRATOR_STATE_PASS, | kg, | ||||
| sd, | sd, | ||||
| [=](const int i) { return integrator_state_read_volume_stack(INTEGRATOR_STATE_PASS, i); }, | [=](const int i) { return integrator_state_read_volume_stack(state, i); }, | ||||
| [=](const int i, const VolumeStack entry) { | [=](const int i, const VolumeStack entry) { | ||||
| integrator_state_write_volume_stack(INTEGRATOR_STATE_PASS, i, entry); | integrator_state_write_volume_stack(state, i, entry); | ||||
| }); | }); | ||||
| } | } | ||||
| ccl_device void shadow_volume_stack_enter_exit(INTEGRATOR_STATE_ARGS, | ccl_device void shadow_volume_stack_enter_exit(KernelGlobals kg, | ||||
| IntegratorState state, | |||||
| ccl_private const ShaderData *sd) | ccl_private const ShaderData *sd) | ||||
| { | { | ||||
| volume_stack_enter_exit( | volume_stack_enter_exit( | ||||
| INTEGRATOR_STATE_PASS, | kg, | ||||
| sd, | sd, | ||||
| [=](const int i) { | [=](const int i) { return integrator_state_read_shadow_volume_stack(state, i); }, | ||||
| return integrator_state_read_shadow_volume_stack(INTEGRATOR_STATE_PASS, i); | |||||
| }, | |||||
| [=](const int i, const VolumeStack entry) { | [=](const int i, const VolumeStack entry) { | ||||
| integrator_state_write_shadow_volume_stack(INTEGRATOR_STATE_PASS, i, entry); | integrator_state_write_shadow_volume_stack(state, i, entry); | ||||
| }); | }); | ||||
| } | } | ||||
| /* Clean stack after the last bounce. | /* Clean stack after the last bounce. | ||||
| * | * | ||||
| * It is expected that all volumes are closed manifolds, so at the time when ray | * It is expected that all volumes are closed manifolds, so at the time when ray | ||||
| * hits nothing (for example, it is a last bounce which goes to environment) the | * hits nothing (for example, it is a last bounce which goes to environment) the | ||||
| * only expected volume in the stack is the world's one. All the rest volume | * only expected volume in the stack is the world's one. All the rest volume | ||||
| * entries should have been exited already. | * entries should have been exited already. | ||||
| * | * | ||||
| * This isn't always true because of ray intersection precision issues, which | * This isn't always true because of ray intersection precision issues, which | ||||
| * could lead us to an infinite non-world volume in the stack, causing render | * could lead us to an infinite non-world volume in the stack, causing render | ||||
| * artifacts. | * artifacts. | ||||
| * | * | ||||
| * Use this function after the last bounce to get rid of all volumes apart from | * Use this function after the last bounce to get rid of all volumes apart from | ||||
| * the world's one after the last bounce to avoid render artifacts. | * the world's one after the last bounce to avoid render artifacts. | ||||
| */ | */ | ||||
| ccl_device_inline void volume_stack_clean(INTEGRATOR_STATE_ARGS) | ccl_device_inline void volume_stack_clean(KernelGlobals kg, IntegratorState state) | ||||
| { | { | ||||
| if (kernel_data.background.volume_shader != SHADER_NONE) { | if (kernel_data.background.volume_shader != SHADER_NONE) { | ||||
| /* Keep the world's volume in stack. */ | /* Keep the world's volume in stack. */ | ||||
| INTEGRATOR_STATE_ARRAY_WRITE(volume_stack, 1, shader) = SHADER_NONE; | INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, 1, shader) = SHADER_NONE; | ||||
| } | } | ||||
| else { | else { | ||||
| INTEGRATOR_STATE_ARRAY_WRITE(volume_stack, 0, shader) = SHADER_NONE; | INTEGRATOR_STATE_ARRAY_WRITE(state, volume_stack, 0, shader) = SHADER_NONE; | ||||
| } | } | ||||
| } | } | ||||
| template<typename StackReadOp> | template<typename StackReadOp> | ||||
| ccl_device float volume_stack_step_size(INTEGRATOR_STATE_ARGS, StackReadOp stack_read) | ccl_device float volume_stack_step_size(KernelGlobals kg, | ||||
| IntegratorState state, | |||||
| StackReadOp stack_read) | |||||
| { | { | ||||
| float step_size = FLT_MAX; | float step_size = FLT_MAX; | ||||
| for (int i = 0;; i++) { | for (int i = 0;; i++) { | ||||
| VolumeStack entry = stack_read(i); | VolumeStack entry = stack_read(i); | ||||
| if (entry.shader == SHADER_NONE) { | if (entry.shader == SHADER_NONE) { | ||||
| break; | break; | ||||
| } | } | ||||
| Show All 30 Lines | |||||
| typedef enum VolumeSampleMethod { | typedef enum VolumeSampleMethod { | ||||
| VOLUME_SAMPLE_NONE = 0, | VOLUME_SAMPLE_NONE = 0, | ||||
| VOLUME_SAMPLE_DISTANCE = (1 << 0), | VOLUME_SAMPLE_DISTANCE = (1 << 0), | ||||
| VOLUME_SAMPLE_EQUIANGULAR = (1 << 1), | VOLUME_SAMPLE_EQUIANGULAR = (1 << 1), | ||||
| VOLUME_SAMPLE_MIS = (VOLUME_SAMPLE_DISTANCE | VOLUME_SAMPLE_EQUIANGULAR), | VOLUME_SAMPLE_MIS = (VOLUME_SAMPLE_DISTANCE | VOLUME_SAMPLE_EQUIANGULAR), | ||||
| } VolumeSampleMethod; | } VolumeSampleMethod; | ||||
| ccl_device VolumeSampleMethod volume_stack_sample_method(INTEGRATOR_STATE_ARGS) | ccl_device VolumeSampleMethod volume_stack_sample_method(KernelGlobals kg, IntegratorState state) | ||||
| { | { | ||||
| VolumeSampleMethod method = VOLUME_SAMPLE_NONE; | VolumeSampleMethod method = VOLUME_SAMPLE_NONE; | ||||
| for (int i = 0;; i++) { | for (int i = 0;; i++) { | ||||
| VolumeStack entry = integrator_state_read_volume_stack(INTEGRATOR_STATE_PASS, i); | VolumeStack entry = integrator_state_read_volume_stack(state, i); | ||||
| if (entry.shader == SHADER_NONE) { | if (entry.shader == SHADER_NONE) { | ||||
| break; | break; | ||||
| } | } | ||||
| int shader_flag = kernel_tex_fetch(__shaders, (entry.shader & SHADER_MASK)).flags; | int shader_flag = kernel_tex_fetch(__shaders, (entry.shader & SHADER_MASK)).flags; | ||||
| if (shader_flag & SD_VOLUME_MIS) { | if (shader_flag & SD_VOLUME_MIS) { | ||||
| /* Multiple importance sampling. */ | /* Multiple importance sampling. */ | ||||
| Show All 26 Lines | |||||