Changeset View
Changeset View
Standalone View
Standalone View
source/blender/blenlib/intern/task.c
| Show First 20 Lines • Show All 169 Lines • ▼ Show 20 Lines | struct TaskPool { | ||||
| volatile bool do_cancel; | volatile bool do_cancel; | ||||
| volatile bool do_work; | volatile bool do_work; | ||||
| volatile bool is_suspended; | volatile bool is_suspended; | ||||
| bool start_suspended; | bool start_suspended; | ||||
| ListBase suspended_queue; | ListBase suspended_queue; | ||||
| size_t num_suspended; | size_t num_suspended; | ||||
| TaskPriority priority; | |||||
| /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler | /* If set, this pool may never be work_and_wait'ed, which means TaskScheduler | ||||
| * has to use its special background fallback thread in case we are in | * has to use its special background fallback thread in case we are in | ||||
| * single-threaded situation. | * single-threaded situation. | ||||
| */ | */ | ||||
| bool run_in_background; | bool run_in_background; | ||||
| /* This is a task scheduler's ID of a thread at which pool was constructed. | /* This is a task scheduler's ID of a thread at which pool was constructed. | ||||
| * It will be used to access task TLS. | * It will be used to access task TLS. | ||||
| ▲ Show 20 Lines • Show All 467 Lines • ▼ Show 20 Lines | static void task_scheduler_clear(TaskScheduler *scheduler, TaskPool *pool) | ||||
| task_pool_num_decrease(pool, done); | task_pool_num_decrease(pool, done); | ||||
| } | } | ||||
| /* Task Pool */ | /* Task Pool */ | ||||
| static TaskPool *task_pool_create_ex(TaskScheduler *scheduler, | static TaskPool *task_pool_create_ex(TaskScheduler *scheduler, | ||||
| void *userdata, | void *userdata, | ||||
| const bool is_background, | const bool is_background, | ||||
| const bool is_suspended) | const bool is_suspended, | ||||
| TaskPriority priority) | |||||
| { | { | ||||
| TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool"); | TaskPool *pool = MEM_mallocN(sizeof(TaskPool), "TaskPool"); | ||||
| #ifndef NDEBUG | #ifndef NDEBUG | ||||
| /* Assert we do not try to create a background pool from some parent task - | /* Assert we do not try to create a background pool from some parent task - | ||||
| * those only work OK from main thread. */ | * those only work OK from main thread. */ | ||||
| if (is_background) { | if (is_background) { | ||||
| const pthread_t thread_id = pthread_self(); | const pthread_t thread_id = pthread_self(); | ||||
| int i = scheduler->num_threads; | int i = scheduler->num_threads; | ||||
| while (i--) { | while (i--) { | ||||
| BLI_assert(!pthread_equal(scheduler->threads[i], thread_id)); | BLI_assert(!pthread_equal(scheduler->threads[i], thread_id)); | ||||
| } | } | ||||
| } | } | ||||
| #endif | #endif | ||||
| pool->scheduler = scheduler; | pool->scheduler = scheduler; | ||||
| pool->num = 0; | pool->num = 0; | ||||
| pool->do_cancel = false; | pool->do_cancel = false; | ||||
| pool->do_work = false; | pool->do_work = false; | ||||
| pool->is_suspended = is_suspended; | pool->is_suspended = is_suspended; | ||||
| pool->start_suspended = is_suspended; | pool->start_suspended = is_suspended; | ||||
| pool->num_suspended = 0; | pool->num_suspended = 0; | ||||
| pool->suspended_queue.first = pool->suspended_queue.last = NULL; | pool->suspended_queue.first = pool->suspended_queue.last = NULL; | ||||
| pool->priority = priority; | |||||
| pool->run_in_background = is_background; | pool->run_in_background = is_background; | ||||
| pool->use_local_tls = false; | pool->use_local_tls = false; | ||||
| BLI_mutex_init(&pool->num_mutex); | BLI_mutex_init(&pool->num_mutex); | ||||
| BLI_condition_init(&pool->num_cond); | BLI_condition_init(&pool->num_cond); | ||||
| pool->userdata = userdata; | pool->userdata = userdata; | ||||
| BLI_mutex_init(&pool->user_mutex); | BLI_mutex_init(&pool->user_mutex); | ||||
| Show All 35 Lines | #endif | ||||
| BLI_threaded_malloc_begin(); | BLI_threaded_malloc_begin(); | ||||
| return pool; | return pool; | ||||
| } | } | ||||
| /** | /** | ||||
| * Create a normal task pool. Tasks will be executed as soon as they are added. | * Create a normal task pool. Tasks will be executed as soon as they are added. | ||||
| */ | */ | ||||
| TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata) | TaskPool *BLI_task_pool_create(TaskScheduler *scheduler, void *userdata, TaskPriority priority) | ||||
| { | { | ||||
| return task_pool_create_ex(scheduler, userdata, false, false); | return task_pool_create_ex(scheduler, userdata, false, false, priority); | ||||
| } | } | ||||
| /** | /** | ||||
| * Create a background task pool. | * Create a background task pool. | ||||
| * In multi-threaded context, there is no differences with #BLI_task_pool_create(), | * In multi-threaded context, there is no differences with #BLI_task_pool_create(), | ||||
| * but in single-threaded case it is ensured to have at least one worker thread to run on | * but in single-threaded case it is ensured to have at least one worker thread to run on | ||||
| * (i.e. you don't have to call #BLI_task_pool_work_and_wait | * (i.e. you don't have to call #BLI_task_pool_work_and_wait | ||||
| * on it to be sure it will be processed). | * on it to be sure it will be processed). | ||||
| * | * | ||||
| * \note Background pools are non-recursive | * \note Background pools are non-recursive | ||||
| * (that is, you should not create other background pools in tasks assigned to a background pool, | * (that is, you should not create other background pools in tasks assigned to a background pool, | ||||
| * they could end never being executed, since the 'fallback' background thread is already | * they could end never being executed, since the 'fallback' background thread is already | ||||
| * busy with parent task in single-threaded context). | * busy with parent task in single-threaded context). | ||||
| */ | */ | ||||
| TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, void *userdata) | TaskPool *BLI_task_pool_create_background(TaskScheduler *scheduler, | ||||
| void *userdata, | |||||
| TaskPriority priority) | |||||
| { | { | ||||
| return task_pool_create_ex(scheduler, userdata, true, false); | return task_pool_create_ex(scheduler, userdata, true, false, priority); | ||||
| } | } | ||||
| /** | /** | ||||
| * Similar to BLI_task_pool_create() but does not schedule any tasks for execution | * Similar to BLI_task_pool_create() but does not schedule any tasks for execution | ||||
| * for until BLI_task_pool_work_and_wait() is called. This helps reducing threading | * for until BLI_task_pool_work_and_wait() is called. This helps reducing threading | ||||
| * overhead when pushing huge amount of small initial tasks from the main thread. | * overhead when pushing huge amount of small initial tasks from the main thread. | ||||
| */ | */ | ||||
| TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, void *userdata) | TaskPool *BLI_task_pool_create_suspended(TaskScheduler *scheduler, | ||||
| void *userdata, | |||||
| TaskPriority priority) | |||||
| { | { | ||||
| return task_pool_create_ex(scheduler, userdata, false, true); | return task_pool_create_ex(scheduler, userdata, false, true, priority); | ||||
| } | } | ||||
| void BLI_task_pool_free(TaskPool *pool) | void BLI_task_pool_free(TaskPool *pool) | ||||
| { | { | ||||
| BLI_task_pool_cancel(pool); | BLI_task_pool_cancel(pool); | ||||
| BLI_mutex_end(&pool->num_mutex); | BLI_mutex_end(&pool->num_mutex); | ||||
| BLI_condition_end(&pool->num_cond); | BLI_condition_end(&pool->num_cond); | ||||
| Show All 26 Lines | BLI_INLINE bool task_can_use_local_queues(TaskPool *pool, int thread_id) | ||||
| return (thread_id != -1 && (thread_id != pool->thread_id || pool->do_work)); | return (thread_id != -1 && (thread_id != pool->thread_id || pool->do_work)); | ||||
| } | } | ||||
| static void task_pool_push(TaskPool *pool, | static void task_pool_push(TaskPool *pool, | ||||
| TaskRunFunction run, | TaskRunFunction run, | ||||
| void *taskdata, | void *taskdata, | ||||
| bool free_taskdata, | bool free_taskdata, | ||||
| TaskFreeFunction freedata, | TaskFreeFunction freedata, | ||||
| TaskPriority priority, | |||||
| int thread_id) | int thread_id) | ||||
| { | { | ||||
| /* Allocate task and fill it's properties. */ | /* Allocate task and fill it's properties. */ | ||||
| Task *task = task_alloc(pool, thread_id); | Task *task = task_alloc(pool, thread_id); | ||||
| task->run = run; | task->run = run; | ||||
| task->taskdata = taskdata; | task->taskdata = taskdata; | ||||
| task->free_taskdata = free_taskdata; | task->free_taskdata = free_taskdata; | ||||
| task->freedata = freedata; | task->freedata = freedata; | ||||
| Show All 29 Lines | if (tls->do_delayed_push && tls->num_delayed_queue < DELAYED_QUEUE_SIZE) { | ||||
| tls->delayed_queue[tls->num_delayed_queue] = task; | tls->delayed_queue[tls->num_delayed_queue] = task; | ||||
| tls->num_delayed_queue++; | tls->num_delayed_queue++; | ||||
| return; | return; | ||||
| } | } | ||||
| } | } | ||||
| /* Do push to a global execution pool, slowest possible method, | /* Do push to a global execution pool, slowest possible method, | ||||
| * causes quite reasonable amount of threading overhead. | * causes quite reasonable amount of threading overhead. | ||||
| */ | */ | ||||
| task_scheduler_push(pool->scheduler, task, priority); | task_scheduler_push(pool->scheduler, task, pool->priority); | ||||
| } | } | ||||
| void BLI_task_pool_push_ex(TaskPool *pool, | void BLI_task_pool_push(TaskPool *pool, | ||||
| TaskRunFunction run, | TaskRunFunction run, | ||||
| void *taskdata, | void *taskdata, | ||||
| bool free_taskdata, | bool free_taskdata, | ||||
| TaskFreeFunction freedata, | TaskFreeFunction freedata) | ||||
| TaskPriority priority) | |||||
| { | |||||
| task_pool_push(pool, run, taskdata, free_taskdata, freedata, priority, -1); | |||||
| } | |||||
| void BLI_task_pool_push( | |||||
| TaskPool *pool, TaskRunFunction run, void *taskdata, bool free_taskdata, TaskPriority priority) | |||||
| { | { | ||||
| BLI_task_pool_push_ex(pool, run, taskdata, free_taskdata, NULL, priority); | task_pool_push(pool, run, taskdata, free_taskdata, freedata, -1); | ||||
| } | } | ||||
| void BLI_task_pool_push_from_thread(TaskPool *pool, | void BLI_task_pool_push_from_thread(TaskPool *pool, | ||||
| TaskRunFunction run, | TaskRunFunction run, | ||||
| void *taskdata, | void *taskdata, | ||||
| bool free_taskdata, | bool free_taskdata, | ||||
| TaskPriority priority, | TaskFreeFunction freedata, | ||||
| int thread_id) | int thread_id) | ||||
| { | { | ||||
| task_pool_push(pool, run, taskdata, free_taskdata, NULL, priority, thread_id); | task_pool_push(pool, run, taskdata, free_taskdata, freedata, thread_id); | ||||
| } | } | ||||
| void BLI_task_pool_work_and_wait(TaskPool *pool) | void BLI_task_pool_work_and_wait(TaskPool *pool) | ||||
| { | { | ||||
| TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id); | TaskThreadLocalStorage *tls = get_task_tls(pool, pool->thread_id); | ||||
| TaskScheduler *scheduler = pool->scheduler; | TaskScheduler *scheduler = pool->scheduler; | ||||
| if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) { | if (atomic_fetch_and_and_uint8((uint8_t *)&pool->is_suspended, 0)) { | ||||
| ▲ Show 20 Lines • Show All 436 Lines • ▼ Show 20 Lines | void BLI_task_parallel_range(const int start, | ||||
| range_pool.num_tasks = num_tasks = min_ii(num_tasks, | range_pool.num_tasks = num_tasks = min_ii(num_tasks, | ||||
| max_ii(1, (stop - start) / range_pool.chunk_size)); | max_ii(1, (stop - start) / range_pool.chunk_size)); | ||||
| if (num_tasks == 1) { | if (num_tasks == 1) { | ||||
| parallel_range_single_thread(&range_pool); | parallel_range_single_thread(&range_pool); | ||||
| return; | return; | ||||
| } | } | ||||
| TaskPool *task_pool = range_pool.pool = BLI_task_pool_create_suspended(task_scheduler, | TaskPool *task_pool = range_pool.pool = BLI_task_pool_create_suspended( | ||||
| &range_pool); | task_scheduler, &range_pool, TASK_PRIORITY_HIGH); | ||||
| range_pool.current_state = &state; | range_pool.current_state = &state; | ||||
| if (use_tls_data) { | if (use_tls_data) { | ||||
| state.flatten_tls_storage = flatten_tls_storage = MALLOCA(tls_data_size * (size_t)num_tasks); | state.flatten_tls_storage = flatten_tls_storage = MALLOCA(tls_data_size * (size_t)num_tasks); | ||||
| state.tls_data_size = tls_data_size; | state.tls_data_size = tls_data_size; | ||||
| } | } | ||||
| for (i = 0; i < num_tasks; i++) { | for (i = 0; i < num_tasks; i++) { | ||||
| if (use_tls_data) { | if (use_tls_data) { | ||||
| void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i); | void *userdata_chunk_local = (char *)flatten_tls_storage + (tls_data_size * (size_t)i); | ||||
| memcpy(userdata_chunk_local, tls_data, tls_data_size); | memcpy(userdata_chunk_local, tls_data, tls_data_size); | ||||
| } | } | ||||
| /* Use this pool's pre-allocated tasks. */ | /* Use this pool's pre-allocated tasks. */ | ||||
| BLI_task_pool_push_from_thread(task_pool, | BLI_task_pool_push_from_thread( | ||||
| parallel_range_func, | task_pool, parallel_range_func, POINTER_FROM_INT(i), false, NULL, task_pool->thread_id); | ||||
| POINTER_FROM_INT(i), | |||||
| false, | |||||
| TASK_PRIORITY_HIGH, | |||||
| task_pool->thread_id); | |||||
| } | } | ||||
| BLI_task_pool_work_and_wait(task_pool); | BLI_task_pool_work_and_wait(task_pool); | ||||
| BLI_task_pool_free(task_pool); | BLI_task_pool_free(task_pool); | ||||
| if (use_tls_data) { | if (use_tls_data) { | ||||
| if (settings->func_finalize != NULL) { | if (settings->func_finalize != NULL) { | ||||
| for (i = 0; i < num_tasks; i++) { | for (i = 0; i < num_tasks; i++) { | ||||
| ▲ Show 20 Lines • Show All 128 Lines • ▼ Show 20 Lines | state->flatten_tls_storage = userdata_chunk_array = MALLOCA(userdata_chunk_size * | ||||
| (size_t)num_tasks); | (size_t)num_tasks); | ||||
| for (int i = 0; i < num_tasks; i++) { | for (int i = 0; i < num_tasks; i++) { | ||||
| void *userdata_chunk_local = (char *)userdata_chunk_array + | void *userdata_chunk_local = (char *)userdata_chunk_array + | ||||
| (userdata_chunk_size * (size_t)i); | (userdata_chunk_size * (size_t)i); | ||||
| memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size); | memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size); | ||||
| } | } | ||||
| } | } | ||||
| TaskPool *task_pool = range_pool->pool = BLI_task_pool_create_suspended(task_scheduler, | TaskPool *task_pool = range_pool->pool = BLI_task_pool_create_suspended( | ||||
| range_pool); | task_scheduler, range_pool, TASK_PRIORITY_HIGH); | ||||
| range_pool->current_state = range_pool->parallel_range_states; | range_pool->current_state = range_pool->parallel_range_states; | ||||
| for (int i = 0; i < num_tasks; i++) { | for (int i = 0; i < num_tasks; i++) { | ||||
| BLI_task_pool_push_from_thread(task_pool, | BLI_task_pool_push_from_thread( | ||||
| parallel_range_func, | task_pool, parallel_range_func, POINTER_FROM_INT(i), false, NULL, task_pool->thread_id); | ||||
| POINTER_FROM_INT(i), | |||||
| false, | |||||
| TASK_PRIORITY_HIGH, | |||||
| task_pool->thread_id); | |||||
| } | } | ||||
| BLI_task_pool_work_and_wait(task_pool); | BLI_task_pool_work_and_wait(task_pool); | ||||
| BLI_assert(atomic_cas_ptr((void **)&range_pool->current_state, NULL, NULL) == NULL); | BLI_assert(atomic_cas_ptr((void **)&range_pool->current_state, NULL, NULL) == NULL); | ||||
| /* Finalize all tasks. */ | /* Finalize all tasks. */ | ||||
| for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL; | for (TaskParallelRangeState *state = range_pool->parallel_range_states; state != NULL; | ||||
| state = state->next) { | state = state->next) { | ||||
| const size_t userdata_chunk_size = state->tls_data_size; | const size_t userdata_chunk_size = state->tls_data_size; | ||||
| void *userdata_chunk_array = state->flatten_tls_storage; | void *userdata_chunk_array = state->flatten_tls_storage; | ||||
| UNUSED_VARS_NDEBUG(userdata_chunk_array); | UNUSED_VARS_NDEBUG(userdata_chunk_array); | ||||
| if (userdata_chunk_size == 0) { | if (userdata_chunk_size == 0) { | ||||
| BLI_assert(userdata_chunk_array == NULL); | BLI_assert(userdata_chunk_array == NULL); | ||||
| continue; | continue; | ||||
| } | } | ||||
| if (state->func_finalize != NULL) { | if (state->func_finalize != NULL) { | ||||
| BLI_task_pool_push_from_thread(task_pool, | BLI_task_pool_push_from_thread( | ||||
| parallel_range_func_finalize, | task_pool, parallel_range_func_finalize, state, false, NULL, task_pool->thread_id); | ||||
| state, | |||||
| false, | |||||
| TASK_PRIORITY_HIGH, | |||||
| task_pool->thread_id); | |||||
| } | } | ||||
| } | } | ||||
| BLI_task_pool_work_and_wait(task_pool); | BLI_task_pool_work_and_wait(task_pool); | ||||
| BLI_task_pool_free(task_pool); | BLI_task_pool_free(task_pool); | ||||
| range_pool->pool = NULL; | range_pool->pool = NULL; | ||||
| /* Cleanup all tasks. */ | /* Cleanup all tasks. */ | ||||
| ▲ Show 20 Lines • Show All 170 Lines • ▼ Show 20 Lines | static void task_parallel_iterator_do(const TaskParallelSettings *settings, | ||||
| state->iter_shared.spin_lock = &spin_lock; | state->iter_shared.spin_lock = &spin_lock; | ||||
| void *userdata_chunk = settings->userdata_chunk; | void *userdata_chunk = settings->userdata_chunk; | ||||
| const size_t userdata_chunk_size = settings->userdata_chunk_size; | const size_t userdata_chunk_size = settings->userdata_chunk_size; | ||||
| void *userdata_chunk_local = NULL; | void *userdata_chunk_local = NULL; | ||||
| void *userdata_chunk_array = NULL; | void *userdata_chunk_array = NULL; | ||||
| const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL); | const bool use_userdata_chunk = (userdata_chunk_size != 0) && (userdata_chunk != NULL); | ||||
| TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, state); | TaskPool *task_pool = BLI_task_pool_create_suspended(task_scheduler, state, TASK_PRIORITY_HIGH); | ||||
| if (use_userdata_chunk) { | if (use_userdata_chunk) { | ||||
| userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks); | userdata_chunk_array = MALLOCA(userdata_chunk_size * num_tasks); | ||||
| } | } | ||||
| for (size_t i = 0; i < num_tasks; i++) { | for (size_t i = 0; i < num_tasks; i++) { | ||||
| if (use_userdata_chunk) { | if (use_userdata_chunk) { | ||||
| userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i); | userdata_chunk_local = (char *)userdata_chunk_array + (userdata_chunk_size * i); | ||||
| memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size); | memcpy(userdata_chunk_local, userdata_chunk, userdata_chunk_size); | ||||
| } | } | ||||
| /* Use this pool's pre-allocated tasks. */ | /* Use this pool's pre-allocated tasks. */ | ||||
| BLI_task_pool_push_from_thread(task_pool, | BLI_task_pool_push_from_thread(task_pool, | ||||
| parallel_iterator_func, | parallel_iterator_func, | ||||
| userdata_chunk_local, | userdata_chunk_local, | ||||
| false, | false, | ||||
| TASK_PRIORITY_HIGH, | NULL, | ||||
| task_pool->thread_id); | task_pool->thread_id); | ||||
| } | } | ||||
| BLI_task_pool_work_and_wait(task_pool); | BLI_task_pool_work_and_wait(task_pool); | ||||
| BLI_task_pool_free(task_pool); | BLI_task_pool_free(task_pool); | ||||
| if (use_userdata_chunk) { | if (use_userdata_chunk) { | ||||
| if (settings->func_finalize != NULL) { | if (settings->func_finalize != NULL) { | ||||
| ▲ Show 20 Lines • Show All 146 Lines • ▼ Show 20 Lines | if (!use_threading) { | ||||
| for (void *item = BLI_mempool_iterstep(&iter); item != NULL; | for (void *item = BLI_mempool_iterstep(&iter); item != NULL; | ||||
| item = BLI_mempool_iterstep(&iter)) { | item = BLI_mempool_iterstep(&iter)) { | ||||
| func(userdata, item); | func(userdata, item); | ||||
| } | } | ||||
| return; | return; | ||||
| } | } | ||||
| task_scheduler = BLI_task_scheduler_get(); | task_scheduler = BLI_task_scheduler_get(); | ||||
| task_pool = BLI_task_pool_create_suspended(task_scheduler, &state); | task_pool = BLI_task_pool_create_suspended(task_scheduler, &state, TASK_PRIORITY_HIGH); | ||||
| num_threads = BLI_task_scheduler_num_threads(task_scheduler); | num_threads = BLI_task_scheduler_num_threads(task_scheduler); | ||||
| /* The idea here is to prevent creating task for each of the loop iterations | /* The idea here is to prevent creating task for each of the loop iterations | ||||
| * and instead have tasks which are evenly distributed across CPU cores and | * and instead have tasks which are evenly distributed across CPU cores and | ||||
| * pull next item to be crunched using the threaded-aware BLI_mempool_iter. | * pull next item to be crunched using the threaded-aware BLI_mempool_iter. | ||||
| */ | */ | ||||
| num_tasks = num_threads + 2; | num_tasks = num_threads + 2; | ||||
| state.userdata = userdata; | state.userdata = userdata; | ||||
| state.func = func; | state.func = func; | ||||
| BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool, | BLI_mempool_iter *mempool_iterators = BLI_mempool_iter_threadsafe_create(mempool, | ||||
| (size_t)num_tasks); | (size_t)num_tasks); | ||||
| for (i = 0; i < num_tasks; i++) { | for (i = 0; i < num_tasks; i++) { | ||||
| /* Use this pool's pre-allocated tasks. */ | /* Use this pool's pre-allocated tasks. */ | ||||
| BLI_task_pool_push_from_thread(task_pool, | BLI_task_pool_push_from_thread(task_pool, | ||||
| parallel_mempool_func, | parallel_mempool_func, | ||||
| &mempool_iterators[i], | &mempool_iterators[i], | ||||
| false, | false, | ||||
| TASK_PRIORITY_HIGH, | NULL, | ||||
| task_pool->thread_id); | task_pool->thread_id); | ||||
| } | } | ||||
| BLI_task_pool_work_and_wait(task_pool); | BLI_task_pool_work_and_wait(task_pool); | ||||
| BLI_task_pool_free(task_pool); | BLI_task_pool_free(task_pool); | ||||
| BLI_mempool_iter_threadsafe_free(mempool_iterators); | BLI_mempool_iter_threadsafe_free(mempool_iterators); | ||||
| } | } | ||||