Changeset View
Changeset View
Standalone View
Standalone View
source/blender/modifiers/intern/MOD_nodes_evaluator.cc
| Show All 29 Lines | |||||
| #include "BLT_translation.h" | #include "BLT_translation.h" | ||||
| #include "BLI_enumerable_thread_specific.hh" | #include "BLI_enumerable_thread_specific.hh" | ||||
| #include "BLI_stack.hh" | #include "BLI_stack.hh" | ||||
| #include "BLI_task.h" | #include "BLI_task.h" | ||||
| #include "BLI_task.hh" | #include "BLI_task.hh" | ||||
| #include "BLI_vector_set.hh" | #include "BLI_vector_set.hh" | ||||
| #include <chrono> | |||||
| namespace blender::modifiers::geometry_nodes { | namespace blender::modifiers::geometry_nodes { | ||||
| using fn::CPPType; | using fn::CPPType; | ||||
| using fn::Field; | using fn::Field; | ||||
| using fn::FieldCPPType; | |||||
| using fn::GField; | using fn::GField; | ||||
| using fn::GValueMap; | using fn::GValueMap; | ||||
| using fn::GVArray; | |||||
| using fn::ValueOrField; | |||||
| using fn::ValueOrFieldCPPType; | |||||
| using nodes::GeoNodeExecParams; | using nodes::GeoNodeExecParams; | ||||
| using namespace fn::multi_function_types; | using namespace fn::multi_function_types; | ||||
| enum class ValueUsage : uint8_t { | enum class ValueUsage : uint8_t { | ||||
| /* The value is definitely used. */ | /* The value is definitely used. */ | ||||
| Required, | Required, | ||||
| /* The value may be used. */ | /* The value may be used. */ | ||||
| Maybe, | Maybe, | ||||
| ▲ Show 20 Lines • Show All 251 Lines • ▼ Show 20 Lines | public: | ||||
| LockedNode(const DNode node, NodeState &node_state) : node(node), node_state(node_state) | LockedNode(const DNode node, NodeState &node_state) : node(node), node_state(node_state) | ||||
| { | { | ||||
| } | } | ||||
| }; | }; | ||||
| static const CPPType *get_socket_cpp_type(const SocketRef &socket) | static const CPPType *get_socket_cpp_type(const SocketRef &socket) | ||||
| { | { | ||||
| const bNodeSocketType *typeinfo = socket.typeinfo(); | const bNodeSocketType *typeinfo = socket.typeinfo(); | ||||
| if (typeinfo->get_geometry_nodes_cpp_type == nullptr) { | if (typeinfo->geometry_nodes_cpp_type == nullptr) { | ||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| const CPPType *type = typeinfo->get_geometry_nodes_cpp_type(); | const CPPType *type = typeinfo->geometry_nodes_cpp_type; | ||||
| if (type == nullptr) { | if (type == nullptr) { | ||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| /* The evaluator only supports types that have special member functions. */ | /* The evaluator only supports types that have special member functions. */ | ||||
| if (!type->has_special_member_functions()) { | if (!type->has_special_member_functions()) { | ||||
| return nullptr; | return nullptr; | ||||
| } | } | ||||
| return type; | return type; | ||||
| Show All 19 Lines | static bool get_implicit_socket_input(const SocketRef &socket, void *r_value) | ||||
| if (socket_declaration.input_field_type() == nodes::InputSocketFieldType::Implicit) { | if (socket_declaration.input_field_type() == nodes::InputSocketFieldType::Implicit) { | ||||
| const bNode &bnode = *socket.bnode(); | const bNode &bnode = *socket.bnode(); | ||||
| if (socket.typeinfo()->type == SOCK_VECTOR) { | if (socket.typeinfo()->type == SOCK_VECTOR) { | ||||
| if (bnode.type == GEO_NODE_SET_CURVE_HANDLES) { | if (bnode.type == GEO_NODE_SET_CURVE_HANDLES) { | ||||
| StringRef side = ((NodeGeometrySetCurveHandlePositions *)bnode.storage)->mode == | StringRef side = ((NodeGeometrySetCurveHandlePositions *)bnode.storage)->mode == | ||||
| GEO_NODE_CURVE_HANDLE_LEFT ? | GEO_NODE_CURVE_HANDLE_LEFT ? | ||||
| "handle_left" : | "handle_left" : | ||||
| "handle_right"; | "handle_right"; | ||||
| new (r_value) Field<float3>(bke::AttributeFieldInput::Create<float3>(side)); | new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>(side)); | ||||
| return true; | return true; | ||||
| } | } | ||||
| new (r_value) Field<float3>(bke::AttributeFieldInput::Create<float3>("position")); | new (r_value) ValueOrField<float3>(bke::AttributeFieldInput::Create<float3>("position")); | ||||
| return true; | return true; | ||||
| } | } | ||||
| if (socket.typeinfo()->type == SOCK_INT) { | if (socket.typeinfo()->type == SOCK_INT) { | ||||
| if (ELEM(bnode.type, FN_NODE_RANDOM_VALUE, GEO_NODE_INSTANCE_ON_POINTS)) { | if (ELEM(bnode.type, FN_NODE_RANDOM_VALUE, GEO_NODE_INSTANCE_ON_POINTS)) { | ||||
| new (r_value) Field<int>(std::make_shared<bke::IDAttributeFieldInput>()); | new (r_value) | ||||
| ValueOrField<int>(Field<int>(std::make_shared<bke::IDAttributeFieldInput>())); | |||||
| return true; | return true; | ||||
| } | } | ||||
| new (r_value) Field<int>(std::make_shared<fn::IndexFieldInput>()); | new (r_value) ValueOrField<int>(Field<int>(std::make_shared<fn::IndexFieldInput>())); | ||||
| return true; | return true; | ||||
| } | } | ||||
| } | } | ||||
| return false; | return false; | ||||
| } | } | ||||
| static void get_socket_value(const SocketRef &socket, void *r_value) | static void get_socket_value(const SocketRef &socket, void *r_value) | ||||
| { | { | ||||
| if (get_implicit_socket_input(socket, r_value)) { | if (get_implicit_socket_input(socket, r_value)) { | ||||
| return; | return; | ||||
| } | } | ||||
| const bNodeSocketType *typeinfo = socket.typeinfo(); | const bNodeSocketType *typeinfo = socket.typeinfo(); | ||||
| typeinfo->get_geometry_nodes_cpp_value(*socket.bsocket(), r_value); | typeinfo->get_geometry_nodes_cpp_value(*socket.bsocket(), r_value); | ||||
| } | } | ||||
| static bool node_supports_laziness(const DNode node) | static bool node_supports_laziness(const DNode node) | ||||
| { | { | ||||
| return node->typeinfo()->geometry_node_execute_supports_laziness; | return node->typeinfo()->geometry_node_execute_supports_laziness; | ||||
| } | } | ||||
| struct NodeTaskRunState { | |||||
| /** The node that should be run on the same thread after the current node finished. */ | |||||
| DNode next_node_to_run; | |||||
| }; | |||||
| /** Implements the callbacks that might be called when a node is executed. */ | /** Implements the callbacks that might be called when a node is executed. */ | ||||
| class NodeParamsProvider : public nodes::GeoNodeExecParamsProvider { | class NodeParamsProvider : public nodes::GeoNodeExecParamsProvider { | ||||
| private: | private: | ||||
| GeometryNodesEvaluator &evaluator_; | GeometryNodesEvaluator &evaluator_; | ||||
| NodeState &node_state_; | NodeState &node_state_; | ||||
| NodeTaskRunState *run_state_; | |||||
| public: | public: | ||||
| NodeParamsProvider(GeometryNodesEvaluator &evaluator, DNode dnode, NodeState &node_state); | NodeParamsProvider(GeometryNodesEvaluator &evaluator, | ||||
| DNode dnode, | |||||
| NodeState &node_state, | |||||
| NodeTaskRunState *run_state); | |||||
| bool can_get_input(StringRef identifier) const override; | bool can_get_input(StringRef identifier) const override; | ||||
| bool can_set_output(StringRef identifier) const override; | bool can_set_output(StringRef identifier) const override; | ||||
| GMutablePointer extract_input(StringRef identifier) override; | GMutablePointer extract_input(StringRef identifier) override; | ||||
| Vector<GMutablePointer> extract_multi_input(StringRef identifier) override; | Vector<GMutablePointer> extract_multi_input(StringRef identifier) override; | ||||
| GPointer get_input(StringRef identifier) const override; | GPointer get_input(StringRef identifier) const override; | ||||
| GMutablePointer alloc_output_value(const CPPType &type) override; | GMutablePointer alloc_output_value(const CPPType &type) override; | ||||
| void set_output(StringRef identifier, GMutablePointer value) override; | void set_output(StringRef identifier, GMutablePointer value) override; | ||||
| ▲ Show 20 Lines • Show All 235 Lines • ▼ Show 20 Lines | for (auto &&item : params_.input_values.items()) { | ||||
| const DNode node = socket.node(); | const DNode node = socket.node(); | ||||
| if (!node_states_.contains_as(node)) { | if (!node_states_.contains_as(node)) { | ||||
| /* The socket is not connected to any output. */ | /* The socket is not connected to any output. */ | ||||
| this->log_socket_value({socket}, value); | this->log_socket_value({socket}, value); | ||||
| value.destruct(); | value.destruct(); | ||||
| continue; | continue; | ||||
| } | } | ||||
| this->forward_output(socket, value); | this->forward_output(socket, value, nullptr); | ||||
| } | } | ||||
| } | } | ||||
| void schedule_initial_nodes() | void schedule_initial_nodes() | ||||
| { | { | ||||
| for (const DInputSocket &socket : params_.output_sockets) { | for (const DInputSocket &socket : params_.output_sockets) { | ||||
| const DNode node = socket.node(); | const DNode node = socket.node(); | ||||
| NodeState &node_state = this->get_node_state(node); | NodeState &node_state = this->get_node_state(node); | ||||
| this->with_locked_node(node, node_state, [&](LockedNode &locked_node) { | this->with_locked_node(node, node_state, nullptr, [&](LockedNode &locked_node) { | ||||
| /* Setting an input as required will schedule any linked node. */ | /* Setting an input as required will schedule any linked node. */ | ||||
| this->set_input_required(locked_node, socket); | this->set_input_required(locked_node, socket); | ||||
| }); | }); | ||||
| } | } | ||||
| for (const DSocket socket : params_.force_compute_sockets) { | for (const DSocket socket : params_.force_compute_sockets) { | ||||
| const DNode node = socket.node(); | const DNode node = socket.node(); | ||||
| NodeState &node_state = this->get_node_state(node); | NodeState &node_state = this->get_node_state(node); | ||||
| this->with_locked_node(node, node_state, [&](LockedNode &locked_node) { | this->with_locked_node(node, node_state, nullptr, [&](LockedNode &locked_node) { | ||||
| if (socket->is_input()) { | if (socket->is_input()) { | ||||
| this->set_input_required(locked_node, DInputSocket(socket)); | this->set_input_required(locked_node, DInputSocket(socket)); | ||||
| } | } | ||||
| else { | else { | ||||
| OutputState &output_state = node_state.outputs[socket->index()]; | OutputState &output_state = node_state.outputs[socket->index()]; | ||||
| output_state.output_usage = ValueUsage::Required; | output_state.output_usage = ValueUsage::Required; | ||||
| this->schedule_node(locked_node); | this->schedule_node(locked_node); | ||||
| } | } | ||||
| Show All 28 Lines | switch (locked_node.node_state.schedule_state) { | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| static void run_node_from_task_pool(TaskPool *task_pool, void *task_data) | static void run_node_from_task_pool(TaskPool *task_pool, void *task_data) | ||||
| { | { | ||||
| void *user_data = BLI_task_pool_user_data(task_pool); | void *user_data = BLI_task_pool_user_data(task_pool); | ||||
| GeometryNodesEvaluator &evaluator = *(GeometryNodesEvaluator *)user_data; | GeometryNodesEvaluator &evaluator = *(GeometryNodesEvaluator *)user_data; | ||||
| const NodeWithState *node_with_state = (const NodeWithState *)task_data; | const NodeWithState *root_node_with_state = (const NodeWithState *)task_data; | ||||
| evaluator.node_task_run(node_with_state->node, *node_with_state->state); | /* First, the node provided by the task pool is executed. During the execution other nodes | ||||
| * might be scheduled. One of those nodes is not added to the task pool but is executed in the | |||||
| * loop below directly. This has two main benefits: | |||||
| * - Fewer round trips through the task pool which add threading overhead. | |||||
| * - Helps with cpu cache efficiency, because a thread is more likely to process data that it | |||||
| * has processed shortly before. | |||||
| */ | |||||
| DNode next_node_to_run = root_node_with_state->node; | |||||
| while (next_node_to_run) { | |||||
| NodeTaskRunState run_state; | |||||
| evaluator.node_task_run(next_node_to_run, &run_state); | |||||
| next_node_to_run = run_state.next_node_to_run; | |||||
| } | |||||
| } | } | ||||
| void node_task_run(const DNode node, NodeState &node_state) | void node_task_run(const DNode node, NodeTaskRunState *run_state) | ||||
| { | { | ||||
| /* These nodes are sometimes scheduled. We could also check for them in other places, but | /* These nodes are sometimes scheduled. We could also check for them in other places, but | ||||
| * it's the easiest to do it here. */ | * it's the easiest to do it here. */ | ||||
| if (node->is_group_input_node() || node->is_group_output_node()) { | if (node->is_group_input_node() || node->is_group_output_node()) { | ||||
| return; | return; | ||||
| } | } | ||||
| const bool do_execute_node = this->node_task_preprocessing(node, node_state); | NodeState &node_state = *node_states_.lookup_key_as(node).state; | ||||
| const bool do_execute_node = this->node_task_preprocessing(node, node_state, run_state); | |||||
| /* Only execute the node if all prerequisites are met. There has to be an output that is | /* Only execute the node if all prerequisites are met. There has to be an output that is | ||||
| * required and all required inputs have to be provided already. */ | * required and all required inputs have to be provided already. */ | ||||
| if (do_execute_node) { | if (do_execute_node) { | ||||
| this->execute_node(node, node_state); | this->execute_node(node, node_state, run_state); | ||||
| } | } | ||||
| this->node_task_postprocessing(node, node_state, do_execute_node); | this->node_task_postprocessing(node, node_state, do_execute_node, run_state); | ||||
| } | } | ||||
| bool node_task_preprocessing(const DNode node, NodeState &node_state) | bool node_task_preprocessing(const DNode node, | ||||
| NodeState &node_state, | |||||
| NodeTaskRunState *run_state) | |||||
| { | { | ||||
| bool do_execute_node = false; | bool do_execute_node = false; | ||||
| this->with_locked_node(node, node_state, [&](LockedNode &locked_node) { | this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { | ||||
| BLI_assert(node_state.schedule_state == NodeScheduleState::Scheduled); | BLI_assert(node_state.schedule_state == NodeScheduleState::Scheduled); | ||||
| node_state.schedule_state = NodeScheduleState::Running; | node_state.schedule_state = NodeScheduleState::Running; | ||||
| /* Early return if the node has finished already. */ | /* Early return if the node has finished already. */ | ||||
| if (locked_node.node_state.node_has_finished) { | if (locked_node.node_state.node_has_finished) { | ||||
| return; | return; | ||||
| } | } | ||||
| /* Prepare outputs and check if actually any new outputs have to be computed. */ | /* Prepare outputs and check if actually any new outputs have to be computed. */ | ||||
| ▲ Show 20 Lines • Show All 142 Lines • ▼ Show 20 Lines | bool prepare_node_inputs_for_execution(LockedNode &locked_node) | ||||
| /* All required inputs have been provided. */ | /* All required inputs have been provided. */ | ||||
| return true; | return true; | ||||
| } | } | ||||
| /** | /** | ||||
| * Actually execute the node. All the required inputs are available and at least one output is | * Actually execute the node. All the required inputs are available and at least one output is | ||||
| * required. | * required. | ||||
| */ | */ | ||||
| void execute_node(const DNode node, NodeState &node_state) | void execute_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) | ||||
| { | { | ||||
| const bNode &bnode = *node->bnode(); | const bNode &bnode = *node->bnode(); | ||||
| if (node_state.has_been_executed) { | if (node_state.has_been_executed) { | ||||
| if (!node_supports_laziness(node)) { | if (!node_supports_laziness(node)) { | ||||
| /* Nodes that don't support laziness must not be executed more than once. */ | /* Nodes that don't support laziness must not be executed more than once. */ | ||||
| BLI_assert_unreachable(); | BLI_assert_unreachable(); | ||||
| } | } | ||||
| } | } | ||||
| node_state.has_been_executed = true; | node_state.has_been_executed = true; | ||||
| /* Use the geometry node execute callback if it exists. */ | /* Use the geometry node execute callback if it exists. */ | ||||
| if (bnode.typeinfo->geometry_node_execute != nullptr) { | if (bnode.typeinfo->geometry_node_execute != nullptr) { | ||||
| this->execute_geometry_node(node, node_state); | this->execute_geometry_node(node, node_state, run_state); | ||||
| return; | return; | ||||
| } | } | ||||
| /* Use the multi-function implementation if it exists. */ | /* Use the multi-function implementation if it exists. */ | ||||
| const nodes::NodeMultiFunctions::Item &fn_item = params_.mf_by_node->try_get(node); | const nodes::NodeMultiFunctions::Item &fn_item = params_.mf_by_node->try_get(node); | ||||
| if (fn_item.fn != nullptr) { | if (fn_item.fn != nullptr) { | ||||
| this->execute_multi_function_node(node, fn_item, node_state); | this->execute_multi_function_node(node, fn_item, node_state, run_state); | ||||
| return; | return; | ||||
| } | } | ||||
| this->execute_unknown_node(node, node_state); | this->execute_unknown_node(node, node_state, run_state); | ||||
| } | } | ||||
| void execute_geometry_node(const DNode node, NodeState &node_state) | void execute_geometry_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) | ||||
| { | { | ||||
| const bNode &bnode = *node->bnode(); | const bNode &bnode = *node->bnode(); | ||||
| NodeParamsProvider params_provider{*this, node, node_state}; | NodeParamsProvider params_provider{*this, node, node_state, run_state}; | ||||
| GeoNodeExecParams params{params_provider}; | GeoNodeExecParams params{params_provider}; | ||||
| if (node->idname().find("Legacy") != StringRef::not_found) { | if (node->idname().find("Legacy") != StringRef::not_found) { | ||||
| params.error_message_add(geo_log::NodeWarningType::Legacy, | params.error_message_add(geo_log::NodeWarningType::Legacy, | ||||
| TIP_("Legacy node will be removed before Blender 4.0")); | TIP_("Legacy node will be removed before Blender 4.0")); | ||||
| } | } | ||||
| using Clock = std::chrono::steady_clock; | |||||
| Clock::time_point begin = Clock::now(); | |||||
| bnode.typeinfo->geometry_node_execute(params); | bnode.typeinfo->geometry_node_execute(params); | ||||
| Clock::time_point end = Clock::now(); | |||||
| const std::chrono::microseconds duration = | |||||
| std::chrono::duration_cast<std::chrono::microseconds>(end - begin); | |||||
| if (params_.geo_logger != nullptr) { | |||||
| params_.geo_logger->local().log_execution_time(node, duration); | |||||
| } | |||||
| } | } | ||||
| void execute_multi_function_node(const DNode node, | void execute_multi_function_node(const DNode node, | ||||
| const nodes::NodeMultiFunctions::Item &fn_item, | const nodes::NodeMultiFunctions::Item &fn_item, | ||||
| NodeState &node_state) | NodeState &node_state, | ||||
| NodeTaskRunState *run_state) | |||||
| { | { | ||||
| if (node->idname().find("Legacy") != StringRef::not_found) { | if (node->idname().find("Legacy") != StringRef::not_found) { | ||||
| /* Create geometry nodes params just for creating an error message. */ | /* Create geometry nodes params just for creating an error message. */ | ||||
| NodeParamsProvider params_provider{*this, node, node_state}; | NodeParamsProvider params_provider{*this, node, node_state, run_state}; | ||||
| GeoNodeExecParams params{params_provider}; | GeoNodeExecParams params{params_provider}; | ||||
| params.error_message_add(geo_log::NodeWarningType::Legacy, | params.error_message_add(geo_log::NodeWarningType::Legacy, | ||||
| TIP_("Legacy node will be removed before Blender 4.0")); | TIP_("Legacy node will be removed before Blender 4.0")); | ||||
| } | } | ||||
| LinearAllocator<> &allocator = local_allocators_.local(); | LinearAllocator<> &allocator = local_allocators_.local(); | ||||
| /* Prepare the inputs for the multi function. */ | bool any_input_is_field = false; | ||||
| Vector<GField> input_fields; | Vector<const void *, 16> input_values; | ||||
| Vector<const ValueOrFieldCPPType *, 16> input_types; | |||||
| for (const int i : node->inputs().index_range()) { | for (const int i : node->inputs().index_range()) { | ||||
| const InputSocketRef &socket_ref = node->input(i); | const InputSocketRef &socket_ref = node->input(i); | ||||
| if (!socket_ref.is_available()) { | if (!socket_ref.is_available()) { | ||||
| continue; | continue; | ||||
| } | } | ||||
| BLI_assert(!socket_ref.is_multi_input_socket()); | BLI_assert(!socket_ref.is_multi_input_socket()); | ||||
| InputState &input_state = node_state.inputs[i]; | InputState &input_state = node_state.inputs[i]; | ||||
| BLI_assert(input_state.was_ready_for_execution); | BLI_assert(input_state.was_ready_for_execution); | ||||
| SingleInputValue &single_value = *input_state.value.single; | SingleInputValue &single_value = *input_state.value.single; | ||||
| BLI_assert(single_value.value != nullptr); | BLI_assert(single_value.value != nullptr); | ||||
| input_fields.append(std::move(*(GField *)single_value.value)); | const ValueOrFieldCPPType &field_cpp_type = static_cast<const ValueOrFieldCPPType &>( | ||||
| *input_state.type); | |||||
| input_values.append(single_value.value); | |||||
| input_types.append(&field_cpp_type); | |||||
| if (field_cpp_type.is_field(single_value.value)) { | |||||
| any_input_is_field = true; | |||||
| } | |||||
| } | |||||
| if (any_input_is_field) { | |||||
| this->execute_multi_function_node__field( | |||||
| node, fn_item, node_state, allocator, input_values, input_types, run_state); | |||||
| } | |||||
| else { | |||||
| this->execute_multi_function_node__value( | |||||
| node, *fn_item.fn, node_state, allocator, input_values, input_types, run_state); | |||||
| } | |||||
| } | |||||
| void execute_multi_function_node__field(const DNode node, | |||||
| const nodes::NodeMultiFunctions::Item &fn_item, | |||||
| NodeState &node_state, | |||||
| LinearAllocator<> &allocator, | |||||
| Span<const void *> input_values, | |||||
| Span<const ValueOrFieldCPPType *> input_types, | |||||
| NodeTaskRunState *run_state) | |||||
| { | |||||
| Vector<GField> input_fields; | |||||
| for (const int i : input_values.index_range()) { | |||||
| const void *input_value_or_field = input_values[i]; | |||||
| const ValueOrFieldCPPType &field_cpp_type = *input_types[i]; | |||||
| input_fields.append(field_cpp_type.as_field(input_value_or_field)); | |||||
| } | } | ||||
| std::shared_ptr<fn::FieldOperation> operation; | std::shared_ptr<fn::FieldOperation> operation; | ||||
| if (fn_item.owned_fn) { | if (fn_item.owned_fn) { | ||||
| operation = std::make_shared<fn::FieldOperation>(fn_item.owned_fn, std::move(input_fields)); | operation = std::make_shared<fn::FieldOperation>(fn_item.owned_fn, std::move(input_fields)); | ||||
| } | } | ||||
| else { | else { | ||||
| operation = std::make_shared<fn::FieldOperation>(*fn_item.fn, std::move(input_fields)); | operation = std::make_shared<fn::FieldOperation>(*fn_item.fn, std::move(input_fields)); | ||||
| } | } | ||||
| /* Forward outputs. */ | |||||
| int output_index = 0; | int output_index = 0; | ||||
| for (const int i : node->outputs().index_range()) { | for (const int i : node->outputs().index_range()) { | ||||
| const OutputSocketRef &socket_ref = node->output(i); | const OutputSocketRef &socket_ref = node->output(i); | ||||
| if (!socket_ref.is_available()) { | if (!socket_ref.is_available()) { | ||||
| continue; | continue; | ||||
| } | } | ||||
| OutputState &output_state = node_state.outputs[i]; | OutputState &output_state = node_state.outputs[i]; | ||||
| const DOutputSocket socket{node.context(), &socket_ref}; | const DOutputSocket socket{node.context(), &socket_ref}; | ||||
| const CPPType *cpp_type = get_socket_cpp_type(socket_ref); | const ValueOrFieldCPPType *cpp_type = static_cast<const ValueOrFieldCPPType *>( | ||||
| get_socket_cpp_type(socket_ref)); | |||||
| GField new_field{operation, output_index}; | GField new_field{operation, output_index}; | ||||
| new_field = fn::make_field_constant_if_possible(std::move(new_field)); | void *buffer = allocator.allocate(cpp_type->size(), cpp_type->alignment()); | ||||
| GField &field_to_forward = *allocator.construct<GField>(std::move(new_field)).release(); | cpp_type->construct_from_field(buffer, std::move(new_field)); | ||||
| this->forward_output(socket, {cpp_type, &field_to_forward}); | this->forward_output(socket, {cpp_type, buffer}, run_state); | ||||
| output_state.has_been_computed = true; | output_state.has_been_computed = true; | ||||
| output_index++; | output_index++; | ||||
| } | } | ||||
| } | } | ||||
| void execute_unknown_node(const DNode node, NodeState &node_state) | void execute_multi_function_node__value(const DNode node, | ||||
| const MultiFunction &fn, | |||||
| NodeState &node_state, | |||||
| LinearAllocator<> &allocator, | |||||
| Span<const void *> input_values, | |||||
| Span<const ValueOrFieldCPPType *> input_types, | |||||
| NodeTaskRunState *run_state) | |||||
| { | |||||
| MFParamsBuilder params{fn, 1}; | |||||
| for (const int i : input_values.index_range()) { | |||||
| const void *input_value_or_field = input_values[i]; | |||||
| const ValueOrFieldCPPType &field_cpp_type = *input_types[i]; | |||||
| const CPPType &base_type = field_cpp_type.base_type(); | |||||
| const void *input_value = field_cpp_type.get_value_ptr(input_value_or_field); | |||||
| params.add_readonly_single_input(GVArray::ForSingleRef(base_type, 1, input_value)); | |||||
| } | |||||
| Vector<GMutablePointer, 16> output_buffers; | |||||
| for (const int i : node->outputs().index_range()) { | |||||
| const DOutputSocket socket = node.output(i); | |||||
| if (!socket->is_available()) { | |||||
| output_buffers.append({}); | |||||
| continue; | |||||
| } | |||||
| const ValueOrFieldCPPType *value_or_field_type = static_cast<const ValueOrFieldCPPType *>( | |||||
| get_socket_cpp_type(socket)); | |||||
| const CPPType &base_type = value_or_field_type->base_type(); | |||||
| void *value_or_field_buffer = allocator.allocate(value_or_field_type->size(), | |||||
| value_or_field_type->alignment()); | |||||
| value_or_field_type->default_construct(value_or_field_buffer); | |||||
| void *value_buffer = value_or_field_type->get_value_ptr(value_or_field_buffer); | |||||
| base_type.destruct(value_buffer); | |||||
| params.add_uninitialized_single_output(GMutableSpan{base_type, value_buffer, 1}); | |||||
| output_buffers.append({value_or_field_type, value_or_field_buffer}); | |||||
| } | |||||
| MFContextBuilder context; | |||||
| fn.call(IndexRange(1), params, context); | |||||
| for (const int i : output_buffers.index_range()) { | |||||
| GMutablePointer buffer = output_buffers[i]; | |||||
| if (buffer.get() == nullptr) { | |||||
| continue; | |||||
| } | |||||
| const DOutputSocket socket = node.output(i); | |||||
| this->forward_output(socket, buffer, run_state); | |||||
| OutputState &output_state = node_state.outputs[i]; | |||||
| output_state.has_been_computed = true; | |||||
| } | |||||
| } | |||||
| void execute_unknown_node(const DNode node, NodeState &node_state, NodeTaskRunState *run_state) | |||||
| { | { | ||||
| LinearAllocator<> &allocator = local_allocators_.local(); | LinearAllocator<> &allocator = local_allocators_.local(); | ||||
| for (const OutputSocketRef *socket : node->outputs()) { | for (const OutputSocketRef *socket : node->outputs()) { | ||||
| if (!socket->is_available()) { | if (!socket->is_available()) { | ||||
| continue; | continue; | ||||
| } | } | ||||
| const CPPType *type = get_socket_cpp_type(*socket); | const CPPType *type = get_socket_cpp_type(*socket); | ||||
| if (type == nullptr) { | if (type == nullptr) { | ||||
| continue; | continue; | ||||
| } | } | ||||
| /* Just forward the default value of the type as a fallback. That's typically better than | /* Just forward the default value of the type as a fallback. That's typically better than | ||||
| * crashing or doing nothing. */ | * crashing or doing nothing. */ | ||||
| OutputState &output_state = node_state.outputs[socket->index()]; | OutputState &output_state = node_state.outputs[socket->index()]; | ||||
| output_state.has_been_computed = true; | output_state.has_been_computed = true; | ||||
| void *buffer = allocator.allocate(type->size(), type->alignment()); | void *buffer = allocator.allocate(type->size(), type->alignment()); | ||||
| this->construct_default_value(*type, buffer); | this->construct_default_value(*type, buffer); | ||||
| this->forward_output({node.context(), socket}, {*type, buffer}); | this->forward_output({node.context(), socket}, {*type, buffer}, run_state); | ||||
| } | } | ||||
| } | } | ||||
| void node_task_postprocessing(const DNode node, NodeState &node_state, bool was_executed) | void node_task_postprocessing(const DNode node, | ||||
| NodeState &node_state, | |||||
| bool was_executed, | |||||
| NodeTaskRunState *run_state) | |||||
| { | { | ||||
| this->with_locked_node(node, node_state, [&](LockedNode &locked_node) { | this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { | ||||
| const bool node_has_finished = this->finish_node_if_possible(locked_node); | const bool node_has_finished = this->finish_node_if_possible(locked_node); | ||||
| const bool reschedule_requested = node_state.schedule_state == | const bool reschedule_requested = node_state.schedule_state == | ||||
| NodeScheduleState::RunningAndRescheduled; | NodeScheduleState::RunningAndRescheduled; | ||||
| node_state.schedule_state = NodeScheduleState::NotScheduled; | node_state.schedule_state = NodeScheduleState::NotScheduled; | ||||
| if (reschedule_requested && !node_has_finished) { | if (reschedule_requested && !node_has_finished) { | ||||
| /* Either the node rescheduled itself or another node tried to schedule it while it ran. */ | /* Either the node rescheduled itself or another node tried to schedule it while it ran. */ | ||||
| this->schedule_node(locked_node); | this->schedule_node(locked_node); | ||||
| } | } | ||||
| ▲ Show 20 Lines • Show All 64 Lines • ▼ Show 20 Lines | for (const DInputSocket &socket : params_.output_sockets) { | ||||
| type.move_construct(value, buffer); | type.move_construct(value, buffer); | ||||
| params_.r_output_values.append({type, buffer}); | params_.r_output_values.append({type, buffer}); | ||||
| } | } | ||||
| } | } | ||||
| /** | /** | ||||
| * Load the required input from the socket or trigger nodes to the left to compute the value. | * Load the required input from the socket or trigger nodes to the left to compute the value. | ||||
| * When this function is called, the node will always be executed again eventually (either | * \return True when the node will be triggered by another node again when the value is computed. | ||||
| * immediately, or when all required inputs have been computed by other nodes). | |||||
| */ | */ | ||||
| void set_input_required(LockedNode &locked_node, const DInputSocket input_socket) | bool set_input_required(LockedNode &locked_node, const DInputSocket input_socket) | ||||
| { | { | ||||
| BLI_assert(locked_node.node == input_socket.node()); | BLI_assert(locked_node.node == input_socket.node()); | ||||
| InputState &input_state = locked_node.node_state.inputs[input_socket->index()]; | InputState &input_state = locked_node.node_state.inputs[input_socket->index()]; | ||||
| /* Value set as unused cannot become used again. */ | /* Value set as unused cannot become used again. */ | ||||
| BLI_assert(input_state.usage != ValueUsage::Unused); | BLI_assert(input_state.usage != ValueUsage::Unused); | ||||
| if (input_state.usage == ValueUsage::Required) { | if (input_state.was_ready_for_execution) { | ||||
| /* The value is already required, but the node might expect to be evaluated again. */ | return false; | ||||
| this->schedule_node(locked_node); | |||||
| /* Returning here also ensure that the code below is executed at most once per input. */ | |||||
| return; | |||||
| } | } | ||||
| input_state.usage = ValueUsage::Required; | |||||
| if (input_state.was_ready_for_execution) { | if (input_state.usage == ValueUsage::Required) { | ||||
| /* The value was already ready, but the node might expect to be evaluated again. */ | /* If the input was not ready for execution but is required, the node will be triggered again | ||||
| this->schedule_node(locked_node); | * once the input has been computed. */ | ||||
| return; | return true; | ||||
| } | } | ||||
| input_state.usage = ValueUsage::Required; | |||||
| /* Count how many values still have to be added to this input until it is "complete". */ | /* Count how many values still have to be added to this input until it is "complete". */ | ||||
| int missing_values = 0; | int missing_values = 0; | ||||
| if (input_socket->is_multi_input_socket()) { | if (input_socket->is_multi_input_socket()) { | ||||
| MultiInputValue &multi_value = *input_state.value.multi; | MultiInputValue &multi_value = *input_state.value.multi; | ||||
| missing_values = multi_value.expected_size - multi_value.items.size(); | missing_values = multi_value.expected_size - multi_value.items.size(); | ||||
| } | } | ||||
| else { | else { | ||||
| SingleInputValue &single_value = *input_state.value.single; | SingleInputValue &single_value = *input_state.value.single; | ||||
| if (single_value.value == nullptr) { | if (single_value.value == nullptr) { | ||||
| missing_values = 1; | missing_values = 1; | ||||
| } | } | ||||
| } | } | ||||
| if (missing_values == 0) { | if (missing_values == 0) { | ||||
| /* The input is fully available already, but the node might expect to be evaluated again. */ | return false; | ||||
| this->schedule_node(locked_node); | |||||
| return; | |||||
| } | } | ||||
| /* Increase the total number of missing required inputs. This ensures that the node will be | /* Increase the total number of missing required inputs. This ensures that the node will be | ||||
| * scheduled correctly when all inputs have been provided. */ | * scheduled correctly when all inputs have been provided. */ | ||||
| locked_node.node_state.missing_required_inputs += missing_values; | locked_node.node_state.missing_required_inputs += missing_values; | ||||
| /* Get all origin sockets, because we have to tag those as required as well. */ | /* Get all origin sockets, because we have to tag those as required as well. */ | ||||
| Vector<DSocket> origin_sockets; | Vector<DSocket> origin_sockets; | ||||
| input_socket.foreach_origin_socket( | input_socket.foreach_origin_socket( | ||||
| [&](const DSocket origin_socket) { origin_sockets.append(origin_socket); }); | [&](const DSocket origin_socket) { origin_sockets.append(origin_socket); }); | ||||
| if (origin_sockets.is_empty()) { | if (origin_sockets.is_empty()) { | ||||
| /* If there are no origin sockets, just load the value from the socket directly. */ | /* If there are no origin sockets, just load the value from the socket directly. */ | ||||
| this->load_unlinked_input_value(locked_node, input_socket, input_state, input_socket); | this->load_unlinked_input_value(locked_node, input_socket, input_state, input_socket); | ||||
| locked_node.node_state.missing_required_inputs -= 1; | locked_node.node_state.missing_required_inputs -= 1; | ||||
| this->schedule_node(locked_node); | return false; | ||||
| return; | |||||
| } | } | ||||
| bool will_be_triggered_by_other_node = false; | bool requested_from_other_node = false; | ||||
| for (const DSocket &origin_socket : origin_sockets) { | for (const DSocket &origin_socket : origin_sockets) { | ||||
| if (origin_socket->is_input()) { | if (origin_socket->is_input()) { | ||||
| /* Load the value directly from the origin socket. In most cases this is an unlinked | /* Load the value directly from the origin socket. In most cases this is an unlinked | ||||
| * group input. */ | * group input. */ | ||||
| this->load_unlinked_input_value(locked_node, input_socket, input_state, origin_socket); | this->load_unlinked_input_value(locked_node, input_socket, input_state, origin_socket); | ||||
| locked_node.node_state.missing_required_inputs -= 1; | locked_node.node_state.missing_required_inputs -= 1; | ||||
| this->schedule_node(locked_node); | |||||
| } | } | ||||
| else { | else { | ||||
| /* The value has not been computed yet, so when it will be forwarded by another node, this | /* The value has not been computed yet, so when it will be forwarded by another node, this | ||||
| * node will be triggered. */ | * node will be triggered. */ | ||||
| will_be_triggered_by_other_node = true; | requested_from_other_node = true; | ||||
| locked_node.delayed_required_outputs.append(DOutputSocket(origin_socket)); | locked_node.delayed_required_outputs.append(DOutputSocket(origin_socket)); | ||||
| } | } | ||||
| } | } | ||||
| /* If this node will be triggered by another node, we don't have to schedule it now. */ | /* If this node will be triggered by another node, we don't have to schedule it now. */ | ||||
| if (!will_be_triggered_by_other_node) { | if (requested_from_other_node) { | ||||
| this->schedule_node(locked_node); | return true; | ||||
| } | } | ||||
| return false; | |||||
| } | } | ||||
| void set_input_unused(LockedNode &locked_node, const DInputSocket socket) | void set_input_unused(LockedNode &locked_node, const DInputSocket socket) | ||||
| { | { | ||||
| InputState &input_state = locked_node.node_state.inputs[socket->index()]; | InputState &input_state = locked_node.node_state.inputs[socket->index()]; | ||||
| /* A required socket cannot become unused. */ | /* A required socket cannot become unused. */ | ||||
| BLI_assert(input_state.usage != ValueUsage::Required); | BLI_assert(input_state.usage != ValueUsage::Required); | ||||
| Show All 19 Lines | socket.foreach_origin_socket([&](const DSocket origin_socket) { | ||||
| * notify. */ | * notify. */ | ||||
| return; | return; | ||||
| } | } | ||||
| /* Delay notification of the other node until this node is not locked anymore. */ | /* Delay notification of the other node until this node is not locked anymore. */ | ||||
| locked_node.delayed_unused_outputs.append(DOutputSocket(origin_socket)); | locked_node.delayed_unused_outputs.append(DOutputSocket(origin_socket)); | ||||
| }); | }); | ||||
| } | } | ||||
| void send_output_required_notification(const DOutputSocket socket) | void send_output_required_notification(const DOutputSocket socket, NodeTaskRunState *run_state) | ||||
| { | { | ||||
| const DNode node = socket.node(); | const DNode node = socket.node(); | ||||
| NodeState &node_state = this->get_node_state(node); | NodeState &node_state = this->get_node_state(node); | ||||
| OutputState &output_state = node_state.outputs[socket->index()]; | OutputState &output_state = node_state.outputs[socket->index()]; | ||||
| this->with_locked_node(node, node_state, [&](LockedNode &locked_node) { | this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { | ||||
| if (output_state.output_usage == ValueUsage::Required) { | if (output_state.output_usage == ValueUsage::Required) { | ||||
| /* Output is marked as required already. So the node is scheduled already. */ | /* Output is marked as required already. So the node is scheduled already. */ | ||||
| return; | return; | ||||
| } | } | ||||
| /* The origin node needs to be scheduled so that it provides the requested input | /* The origin node needs to be scheduled so that it provides the requested input | ||||
| * eventually. */ | * eventually. */ | ||||
| output_state.output_usage = ValueUsage::Required; | output_state.output_usage = ValueUsage::Required; | ||||
| this->schedule_node(locked_node); | this->schedule_node(locked_node); | ||||
| }); | }); | ||||
| } | } | ||||
| void send_output_unused_notification(const DOutputSocket socket) | void send_output_unused_notification(const DOutputSocket socket, NodeTaskRunState *run_state) | ||||
| { | { | ||||
| const DNode node = socket.node(); | const DNode node = socket.node(); | ||||
| NodeState &node_state = this->get_node_state(node); | NodeState &node_state = this->get_node_state(node); | ||||
| OutputState &output_state = node_state.outputs[socket->index()]; | OutputState &output_state = node_state.outputs[socket->index()]; | ||||
| this->with_locked_node(node, node_state, [&](LockedNode &locked_node) { | this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { | ||||
| output_state.potential_users -= 1; | output_state.potential_users -= 1; | ||||
| if (output_state.potential_users == 0) { | if (output_state.potential_users == 0) { | ||||
| /* The socket might be required even though the output is not used by other sockets. That | /* The socket might be required even though the output is not used by other sockets. That | ||||
| * can happen when the socket is forced to be computed. */ | * can happen when the socket is forced to be computed. */ | ||||
| if (output_state.output_usage != ValueUsage::Required) { | if (output_state.output_usage != ValueUsage::Required) { | ||||
| /* The output socket has no users anymore. */ | /* The output socket has no users anymore. */ | ||||
| output_state.output_usage = ValueUsage::Unused; | output_state.output_usage = ValueUsage::Unused; | ||||
| /* Schedule the origin node in case it wants to set its inputs as unused as well. */ | /* Schedule the origin node in case it wants to set its inputs as unused as well. */ | ||||
| Show All 10 Lines | void add_node_to_task_pool(const DNode node) | ||||
| const NodeWithState *node_with_state = node_states_.lookup_key_ptr_as(node); | const NodeWithState *node_with_state = node_states_.lookup_key_ptr_as(node); | ||||
| BLI_task_pool_push( | BLI_task_pool_push( | ||||
| task_pool_, run_node_from_task_pool, (void *)node_with_state, false, nullptr); | task_pool_, run_node_from_task_pool, (void *)node_with_state, false, nullptr); | ||||
| } | } | ||||
| /** | /** | ||||
| * Moves a newly computed value from an output socket to all the inputs that might need it. | * Moves a newly computed value from an output socket to all the inputs that might need it. | ||||
| */ | */ | ||||
| void forward_output(const DOutputSocket from_socket, GMutablePointer value_to_forward) | void forward_output(const DOutputSocket from_socket, | ||||
| GMutablePointer value_to_forward, | |||||
| NodeTaskRunState *run_state) | |||||
| { | { | ||||
| BLI_assert(value_to_forward.get() != nullptr); | BLI_assert(value_to_forward.get() != nullptr); | ||||
| LinearAllocator<> &allocator = local_allocators_.local(); | LinearAllocator<> &allocator = local_allocators_.local(); | ||||
| Vector<DSocket> log_original_value_sockets; | Vector<DSocket> log_original_value_sockets; | ||||
| Vector<DInputSocket> forward_original_value_sockets; | Vector<DInputSocket> forward_original_value_sockets; | ||||
| log_original_value_sockets.append(from_socket); | log_original_value_sockets.append(from_socket); | ||||
| Show All 36 Lines | from_socket.foreach_target_socket( | ||||
| } | } | ||||
| } | } | ||||
| if (current_value.get() == value_to_forward.get()) { | if (current_value.get() == value_to_forward.get()) { | ||||
| /* The value has not been converted, so forward the original value. */ | /* The value has not been converted, so forward the original value. */ | ||||
| forward_original_value_sockets.append(to_socket); | forward_original_value_sockets.append(to_socket); | ||||
| } | } | ||||
| else { | else { | ||||
| /* The value has been converted. */ | /* The value has been converted. */ | ||||
| this->add_value_to_input_socket(to_socket, from_socket, current_value); | this->add_value_to_input_socket(to_socket, from_socket, current_value, run_state); | ||||
| } | } | ||||
| }); | }); | ||||
| this->log_socket_value(log_original_value_sockets, value_to_forward); | this->log_socket_value(log_original_value_sockets, value_to_forward); | ||||
| this->forward_to_sockets_with_same_type( | this->forward_to_sockets_with_same_type( | ||||
| allocator, forward_original_value_sockets, value_to_forward, from_socket); | allocator, forward_original_value_sockets, value_to_forward, from_socket, run_state); | ||||
| } | } | ||||
| bool should_forward_to_socket(const DInputSocket socket) | bool should_forward_to_socket(const DInputSocket socket) | ||||
| { | { | ||||
| const DNode to_node = socket.node(); | const DNode to_node = socket.node(); | ||||
| const NodeWithState *target_node_with_state = node_states_.lookup_key_ptr_as(to_node); | const NodeWithState *target_node_with_state = node_states_.lookup_key_ptr_as(to_node); | ||||
| if (target_node_with_state == nullptr) { | if (target_node_with_state == nullptr) { | ||||
| /* If the socket belongs to a node that has no state, the entire node is not used. */ | /* If the socket belongs to a node that has no state, the entire node is not used. */ | ||||
| return false; | return false; | ||||
| } | } | ||||
| NodeState &target_node_state = *target_node_with_state->state; | NodeState &target_node_state = *target_node_with_state->state; | ||||
| InputState &target_input_state = target_node_state.inputs[socket->index()]; | InputState &target_input_state = target_node_state.inputs[socket->index()]; | ||||
| std::lock_guard lock{target_node_state.mutex}; | std::lock_guard lock{target_node_state.mutex}; | ||||
| /* Do not forward to an input socket whose value won't be used. */ | /* Do not forward to an input socket whose value won't be used. */ | ||||
| return target_input_state.usage != ValueUsage::Unused; | return target_input_state.usage != ValueUsage::Unused; | ||||
| } | } | ||||
| void forward_to_sockets_with_same_type(LinearAllocator<> &allocator, | void forward_to_sockets_with_same_type(LinearAllocator<> &allocator, | ||||
| Span<DInputSocket> to_sockets, | Span<DInputSocket> to_sockets, | ||||
| GMutablePointer value_to_forward, | GMutablePointer value_to_forward, | ||||
| const DOutputSocket from_socket) | const DOutputSocket from_socket, | ||||
| NodeTaskRunState *run_state) | |||||
| { | { | ||||
| if (to_sockets.is_empty()) { | if (to_sockets.is_empty()) { | ||||
| /* Value is not used anymore, so it can be destructed. */ | /* Value is not used anymore, so it can be destructed. */ | ||||
| value_to_forward.destruct(); | value_to_forward.destruct(); | ||||
| } | } | ||||
| else if (to_sockets.size() == 1) { | else if (to_sockets.size() == 1) { | ||||
| /* Value is only used by one input socket, no need to copy it. */ | /* Value is only used by one input socket, no need to copy it. */ | ||||
| const DInputSocket to_socket = to_sockets[0]; | const DInputSocket to_socket = to_sockets[0]; | ||||
| this->add_value_to_input_socket(to_socket, from_socket, value_to_forward); | this->add_value_to_input_socket(to_socket, from_socket, value_to_forward, run_state); | ||||
| } | } | ||||
| else { | else { | ||||
| /* Multiple inputs use the value, make a copy for every input except for one. */ | /* Multiple inputs use the value, make a copy for every input except for one. */ | ||||
| /* First make the copies, so that the next node does not start modifying the value while we | /* First make the copies, so that the next node does not start modifying the value while we | ||||
| * are still making copies. */ | * are still making copies. */ | ||||
| const CPPType &type = *value_to_forward.type(); | const CPPType &type = *value_to_forward.type(); | ||||
| for (const DInputSocket &to_socket : to_sockets.drop_front(1)) { | for (const DInputSocket &to_socket : to_sockets.drop_front(1)) { | ||||
| void *buffer = allocator.allocate(type.size(), type.alignment()); | void *buffer = allocator.allocate(type.size(), type.alignment()); | ||||
| type.copy_construct(value_to_forward.get(), buffer); | type.copy_construct(value_to_forward.get(), buffer); | ||||
| this->add_value_to_input_socket(to_socket, from_socket, {type, buffer}); | this->add_value_to_input_socket(to_socket, from_socket, {type, buffer}, run_state); | ||||
| } | } | ||||
| /* Forward the original value to one of the targets. */ | /* Forward the original value to one of the targets. */ | ||||
| const DInputSocket to_socket = to_sockets[0]; | const DInputSocket to_socket = to_sockets[0]; | ||||
| this->add_value_to_input_socket(to_socket, from_socket, value_to_forward); | this->add_value_to_input_socket(to_socket, from_socket, value_to_forward, run_state); | ||||
| } | } | ||||
| } | } | ||||
| void add_value_to_input_socket(const DInputSocket socket, | void add_value_to_input_socket(const DInputSocket socket, | ||||
| const DOutputSocket origin, | const DOutputSocket origin, | ||||
| GMutablePointer value) | GMutablePointer value, | ||||
| NodeTaskRunState *run_state) | |||||
| { | { | ||||
| BLI_assert(socket->is_available()); | BLI_assert(socket->is_available()); | ||||
| const DNode node = socket.node(); | const DNode node = socket.node(); | ||||
| NodeState &node_state = this->get_node_state(node); | NodeState &node_state = this->get_node_state(node); | ||||
| InputState &input_state = node_state.inputs[socket->index()]; | InputState &input_state = node_state.inputs[socket->index()]; | ||||
| this->with_locked_node(node, node_state, [&](LockedNode &locked_node) { | this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { | ||||
| if (socket->is_multi_input_socket()) { | if (socket->is_multi_input_socket()) { | ||||
| /* Add a new value to the multi-input. */ | /* Add a new value to the multi-input. */ | ||||
| MultiInputValue &multi_value = *input_state.value.multi; | MultiInputValue &multi_value = *input_state.value.multi; | ||||
| multi_value.items.append({origin, value.get()}); | multi_value.items.append({origin, value.get()}); | ||||
| if (multi_value.expected_size == multi_value.items.size()) { | if (multi_value.expected_size == multi_value.items.size()) { | ||||
| this->log_socket_value({socket}, input_state, multi_value.items); | this->log_socket_value({socket}, input_state, multi_value.items); | ||||
| } | } | ||||
| Show All 10 Lines | this->with_locked_node(node, node_state, run_state, [&](LockedNode &locked_node) { | ||||
| if (node_state.missing_required_inputs == 0) { | if (node_state.missing_required_inputs == 0) { | ||||
| /* Schedule node if all the required inputs have been provided. */ | /* Schedule node if all the required inputs have been provided. */ | ||||
| this->schedule_node(locked_node); | this->schedule_node(locked_node); | ||||
| } | } | ||||
| } | } | ||||
| }); | }); | ||||
| } | } | ||||
| /** | |||||
| * Loads the value of a socket that is not computed by another node. Note that the socket may | |||||
| * still be linked to e.g. a Group Input node, but the socket on the outside is not connected to | |||||
| * anything. | |||||
| * | |||||
| * \param input_socket The socket of the node that wants to use the value. | |||||
| * \param origin_socket The socket that we want to load the value from. | |||||
| */ | |||||
| void load_unlinked_input_value(LockedNode &locked_node, | void load_unlinked_input_value(LockedNode &locked_node, | ||||
| const DInputSocket input_socket, | const DInputSocket input_socket, | ||||
| InputState &input_state, | InputState &input_state, | ||||
| const DSocket origin_socket) | const DSocket origin_socket) | ||||
| { | { | ||||
| /* Only takes locked node as parameter, because the node needs to be locked. */ | /* Only takes locked node as parameter, because the node needs to be locked. */ | ||||
| UNUSED_VARS(locked_node); | UNUSED_VARS(locked_node); | ||||
| GMutablePointer value = this->get_value_from_socket(origin_socket, *input_state.type); | GMutablePointer value = this->get_value_from_socket(origin_socket, *input_state.type); | ||||
| if (input_socket->is_multi_input_socket()) { | if (input_socket->is_multi_input_socket()) { | ||||
| MultiInputValue &multi_value = *input_state.value.multi; | MultiInputValue &multi_value = *input_state.value.multi; | ||||
| multi_value.items.append({origin_socket, value.get()}); | multi_value.items.append({origin_socket, value.get()}); | ||||
| if (multi_value.expected_size == multi_value.items.size()) { | if (multi_value.expected_size == multi_value.items.size()) { | ||||
| this->log_socket_value({input_socket}, input_state, multi_value.items); | this->log_socket_value({input_socket}, input_state, multi_value.items); | ||||
| } | } | ||||
| } | } | ||||
| else { | else { | ||||
| SingleInputValue &single_value = *input_state.value.single; | SingleInputValue &single_value = *input_state.value.single; | ||||
| single_value.value = value.get(); | single_value.value = value.get(); | ||||
| this->log_socket_value({input_socket}, value); | Vector<DSocket> sockets_to_log_to = {input_socket}; | ||||
| if (origin_socket != input_socket) { | |||||
| /* This might log the socket value for the #origin_socket more than once, but this is | |||||
| * handled by the logging system gracefully. */ | |||||
| sockets_to_log_to.append(origin_socket); | |||||
| } | |||||
| /* TODO: Log to the intermediate sockets between the group input and where the value is | |||||
| * actually used as well. */ | |||||
| this->log_socket_value(sockets_to_log_to, value); | |||||
| } | } | ||||
| } | } | ||||
| void destruct_input_value_if_exists(LockedNode &locked_node, const DInputSocket socket) | void destruct_input_value_if_exists(LockedNode &locked_node, const DInputSocket socket) | ||||
| { | { | ||||
| InputState &input_state = locked_node.node_state.inputs[socket->index()]; | InputState &input_state = locked_node.node_state.inputs[socket->index()]; | ||||
| if (socket->is_multi_input_socket()) { | if (socket->is_multi_input_socket()) { | ||||
| MultiInputValue &multi_value = *input_state.value.multi; | MultiInputValue &multi_value = *input_state.value.multi; | ||||
| Show All 32 Lines | void convert_value(const CPPType &from_type, | ||||
| const CPPType &to_type, | const CPPType &to_type, | ||||
| const void *from_value, | const void *from_value, | ||||
| void *to_value) | void *to_value) | ||||
| { | { | ||||
| if (from_type == to_type) { | if (from_type == to_type) { | ||||
| from_type.copy_construct(from_value, to_value); | from_type.copy_construct(from_value, to_value); | ||||
| return; | return; | ||||
| } | } | ||||
| const ValueOrFieldCPPType *from_field_type = dynamic_cast<const ValueOrFieldCPPType *>( | |||||
| const FieldCPPType *from_field_type = dynamic_cast<const FieldCPPType *>(&from_type); | &from_type); | ||||
| const FieldCPPType *to_field_type = dynamic_cast<const FieldCPPType *>(&to_type); | const ValueOrFieldCPPType *to_field_type = dynamic_cast<const ValueOrFieldCPPType *>(&to_type); | ||||
| if (from_field_type != nullptr && to_field_type != nullptr) { | if (from_field_type != nullptr && to_field_type != nullptr) { | ||||
| const CPPType &from_base_type = from_field_type->field_type(); | const CPPType &from_base_type = from_field_type->base_type(); | ||||
| const CPPType &to_base_type = to_field_type->field_type(); | const CPPType &to_base_type = to_field_type->base_type(); | ||||
| if (conversions_.is_convertible(from_base_type, to_base_type)) { | if (conversions_.is_convertible(from_base_type, to_base_type)) { | ||||
| if (from_field_type->is_field(from_value)) { | |||||
| const GField &from_field = *from_field_type->get_field_ptr(from_value); | |||||
| const MultiFunction &fn = *conversions_.get_conversion_multi_function( | const MultiFunction &fn = *conversions_.get_conversion_multi_function( | ||||
| MFDataType::ForSingle(from_base_type), MFDataType::ForSingle(to_base_type)); | MFDataType::ForSingle(from_base_type), MFDataType::ForSingle(to_base_type)); | ||||
| const GField &from_field = *(const GField *)from_value; | |||||
| auto operation = std::make_shared<fn::FieldOperation>(fn, Vector<GField>{from_field}); | auto operation = std::make_shared<fn::FieldOperation>(fn, Vector<GField>{from_field}); | ||||
| new (to_value) GField(std::move(operation), 0); | to_field_type->construct_from_field(to_value, GField(std::move(operation), 0)); | ||||
| } | |||||
| else { | |||||
| to_field_type->default_construct(to_value); | |||||
| const void *from_value_ptr = from_field_type->get_value_ptr(from_value); | |||||
| void *to_value_ptr = to_field_type->get_value_ptr(to_value); | |||||
| conversions_.get_conversion_functions(from_base_type, to_base_type) | |||||
| ->convert_single_to_initialized(from_value_ptr, to_value_ptr); | |||||
| } | |||||
| return; | return; | ||||
| } | } | ||||
| } | } | ||||
| if (conversions_.is_convertible(from_type, to_type)) { | if (conversions_.is_convertible(from_type, to_type)) { | ||||
| /* Do the conversion if possible. */ | /* Do the conversion if possible. */ | ||||
| conversions_.convert_to_uninitialized(from_type, to_type, from_value, to_value); | conversions_.convert_to_uninitialized(from_type, to_type, from_value, to_value); | ||||
| } | } | ||||
| else { | else { | ||||
| /* Cannot convert, use default value instead. */ | /* Cannot convert, use default value instead. */ | ||||
| this->construct_default_value(to_type, to_value); | this->construct_default_value(to_type, to_value); | ||||
| } | } | ||||
| } | } | ||||
| void construct_default_value(const CPPType &type, void *r_value) | void construct_default_value(const CPPType &type, void *r_value) | ||||
| { | { | ||||
| if (const FieldCPPType *field_cpp_type = dynamic_cast<const FieldCPPType *>(&type)) { | |||||
| const CPPType &base_type = field_cpp_type->field_type(); | |||||
| auto constant_fn = std::make_unique<fn::CustomMF_GenericConstant>( | |||||
| base_type, base_type.default_value(), false); | |||||
| auto operation = std::make_shared<fn::FieldOperation>(std::move(constant_fn)); | |||||
| new (r_value) GField(std::move(operation), 0); | |||||
| return; | |||||
| } | |||||
| type.copy_construct(type.default_value(), r_value); | type.copy_construct(type.default_value(), r_value); | ||||
| } | } | ||||
| NodeState &get_node_state(const DNode node) | NodeState &get_node_state(const DNode node) | ||||
| { | { | ||||
| return *node_states_.lookup_key_as(node).state; | return *node_states_.lookup_key_as(node).state; | ||||
| } | } | ||||
| Show All 15 Lines | #endif | ||||
| void log_socket_value(Span<DSocket> sockets, GPointer value) | void log_socket_value(Span<DSocket> sockets, GPointer value) | ||||
| { | { | ||||
| if (params_.geo_logger == nullptr) { | if (params_.geo_logger == nullptr) { | ||||
| return; | return; | ||||
| } | } | ||||
| params_.geo_logger->local().log_value_for_sockets(sockets, value); | params_.geo_logger->local().log_value_for_sockets(sockets, value); | ||||
| } | } | ||||
| void log_debug_message(DNode node, std::string message) | |||||
| { | |||||
| if (params_.geo_logger == nullptr) { | |||||
| return; | |||||
| } | |||||
| params_.geo_logger->local().log_debug_message(node, std::move(message)); | |||||
| } | |||||
| /* In most cases when `NodeState` is accessed, the node has to be locked first to avoid race | /* In most cases when `NodeState` is accessed, the node has to be locked first to avoid race | ||||
| * conditions. */ | * conditions. */ | ||||
| template<typename Function> | template<typename Function> | ||||
| void with_locked_node(const DNode node, NodeState &node_state, const Function &function) | void with_locked_node(const DNode node, | ||||
| NodeState &node_state, | |||||
| NodeTaskRunState *run_state, | |||||
| const Function &function) | |||||
| { | { | ||||
| LockedNode locked_node{node, node_state}; | LockedNode locked_node{node, node_state}; | ||||
| node_state.mutex.lock(); | node_state.mutex.lock(); | ||||
| /* Isolate this thread because we don't want it to start executing another node. This other | /* Isolate this thread because we don't want it to start executing another node. This other | ||||
| * node might want to lock the same mutex leading to a deadlock. */ | * node might want to lock the same mutex leading to a deadlock. */ | ||||
| threading::isolate_task([&] { function(locked_node); }); | threading::isolate_task([&] { function(locked_node); }); | ||||
| node_state.mutex.unlock(); | node_state.mutex.unlock(); | ||||
| /* Then send notifications to the other nodes after the node state is unlocked. This avoids | /* Then send notifications to the other nodes after the node state is unlocked. This avoids | ||||
| * locking two nodes at the same time on this thread and helps to prevent deadlocks. */ | * locking two nodes at the same time on this thread and helps to prevent deadlocks. */ | ||||
| for (const DOutputSocket &socket : locked_node.delayed_required_outputs) { | for (const DOutputSocket &socket : locked_node.delayed_required_outputs) { | ||||
| this->send_output_required_notification(socket); | this->send_output_required_notification(socket, run_state); | ||||
| } | } | ||||
| for (const DOutputSocket &socket : locked_node.delayed_unused_outputs) { | for (const DOutputSocket &socket : locked_node.delayed_unused_outputs) { | ||||
| this->send_output_unused_notification(socket); | this->send_output_unused_notification(socket, run_state); | ||||
| } | |||||
| for (const DNode &node_to_schedule : locked_node.delayed_scheduled_nodes) { | |||||
| if (run_state != nullptr && !run_state->next_node_to_run) { | |||||
| /* Execute the node on the same thread after the current node finished. */ | |||||
| /* Currently, this assumes that it is always best to run the first node that is scheduled | |||||
| * on the same thread. That is usually correct, because the geometry socket which carries | |||||
| * the most data usually comes first in nodes. */ | |||||
| run_state->next_node_to_run = node_to_schedule; | |||||
| } | |||||
| else { | |||||
| /* Push the node to the task pool so that another thread can start working on it. */ | |||||
| this->add_node_to_task_pool(node_to_schedule); | |||||
| } | } | ||||
| for (const DNode &node : locked_node.delayed_scheduled_nodes) { | |||||
| this->add_node_to_task_pool(node); | |||||
| } | } | ||||
| } | } | ||||
| }; | }; | ||||
| NodeParamsProvider::NodeParamsProvider(GeometryNodesEvaluator &evaluator, | NodeParamsProvider::NodeParamsProvider(GeometryNodesEvaluator &evaluator, | ||||
| DNode dnode, | DNode dnode, | ||||
| NodeState &node_state) | NodeState &node_state, | ||||
| : evaluator_(evaluator), node_state_(node_state) | NodeTaskRunState *run_state) | ||||
| : evaluator_(evaluator), node_state_(node_state), run_state_(run_state) | |||||
| { | { | ||||
| this->dnode = dnode; | this->dnode = dnode; | ||||
| this->self_object = evaluator.params_.self_object; | this->self_object = evaluator.params_.self_object; | ||||
| this->modifier = &evaluator.params_.modifier_->modifier; | this->modifier = &evaluator.params_.modifier_->modifier; | ||||
| this->depsgraph = evaluator.params_.depsgraph; | this->depsgraph = evaluator.params_.depsgraph; | ||||
| this->logger = evaluator.params_.geo_logger; | this->logger = evaluator.params_.geo_logger; | ||||
| } | } | ||||
| ▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | |||||
| void NodeParamsProvider::set_output(StringRef identifier, GMutablePointer value) | void NodeParamsProvider::set_output(StringRef identifier, GMutablePointer value) | ||||
| { | { | ||||
| const DOutputSocket socket = this->dnode.output_by_identifier(identifier); | const DOutputSocket socket = this->dnode.output_by_identifier(identifier); | ||||
| BLI_assert(socket); | BLI_assert(socket); | ||||
| OutputState &output_state = node_state_.outputs[socket->index()]; | OutputState &output_state = node_state_.outputs[socket->index()]; | ||||
| BLI_assert(!output_state.has_been_computed); | BLI_assert(!output_state.has_been_computed); | ||||
| evaluator_.forward_output(socket, value); | evaluator_.forward_output(socket, value, run_state_); | ||||
| output_state.has_been_computed = true; | output_state.has_been_computed = true; | ||||
| } | } | ||||
| bool NodeParamsProvider::lazy_require_input(StringRef identifier) | bool NodeParamsProvider::lazy_require_input(StringRef identifier) | ||||
| { | { | ||||
| BLI_assert(node_supports_laziness(this->dnode)); | BLI_assert(node_supports_laziness(this->dnode)); | ||||
| const DInputSocket socket = this->dnode.input_by_identifier(identifier); | const DInputSocket socket = this->dnode.input_by_identifier(identifier); | ||||
| BLI_assert(socket); | BLI_assert(socket); | ||||
| InputState &input_state = node_state_.inputs[socket->index()]; | InputState &input_state = node_state_.inputs[socket->index()]; | ||||
| if (input_state.was_ready_for_execution) { | if (input_state.was_ready_for_execution) { | ||||
| return false; | return false; | ||||
| } | } | ||||
| evaluator_.with_locked_node(this->dnode, node_state_, [&](LockedNode &locked_node) { | evaluator_.with_locked_node(this->dnode, node_state_, run_state_, [&](LockedNode &locked_node) { | ||||
| evaluator_.set_input_required(locked_node, socket); | if (!evaluator_.set_input_required(locked_node, socket)) { | ||||
| /* Schedule the currently executed node again because the value is available now but was not | |||||
| * ready for the current execution. */ | |||||
| evaluator_.schedule_node(locked_node); | |||||
| } | |||||
| }); | }); | ||||
| return true; | return true; | ||||
| } | } | ||||
| void NodeParamsProvider::set_input_unused(StringRef identifier) | void NodeParamsProvider::set_input_unused(StringRef identifier) | ||||
| { | { | ||||
| const DInputSocket socket = this->dnode.input_by_identifier(identifier); | const DInputSocket socket = this->dnode.input_by_identifier(identifier); | ||||
| BLI_assert(socket); | BLI_assert(socket); | ||||
| evaluator_.with_locked_node(this->dnode, node_state_, [&](LockedNode &locked_node) { | evaluator_.with_locked_node(this->dnode, node_state_, run_state_, [&](LockedNode &locked_node) { | ||||
| evaluator_.set_input_unused(locked_node, socket); | evaluator_.set_input_unused(locked_node, socket); | ||||
| }); | }); | ||||
| } | } | ||||
| bool NodeParamsProvider::output_is_required(StringRef identifier) const | bool NodeParamsProvider::output_is_required(StringRef identifier) const | ||||
| { | { | ||||
| const DOutputSocket socket = this->dnode.output_by_identifier(identifier); | const DOutputSocket socket = this->dnode.output_by_identifier(identifier); | ||||
| BLI_assert(socket); | BLI_assert(socket); | ||||
| Show All 28 Lines | |||||