Changeset View
Changeset View
Standalone View
Standalone View
source/blender/blenlib/intern/threads.cc
- This file was moved from source/blender/blenlib/intern/threads.c.
| Show First 20 Lines • Show All 41 Lines • ▼ Show 20 Lines | |||||
| #elif defined(__APPLE__) | #elif defined(__APPLE__) | ||||
| # include <sys/sysctl.h> | # include <sys/sysctl.h> | ||||
| # include <sys/types.h> | # include <sys/types.h> | ||||
| #else | #else | ||||
| # include <sys/time.h> | # include <sys/time.h> | ||||
| # include <unistd.h> | # include <unistd.h> | ||||
| #endif | #endif | ||||
| #ifdef WITH_TBB | |||||
| # include <tbb/spin_mutex.h> | |||||
| #endif | |||||
| #include "atomic_ops.h" | #include "atomic_ops.h" | ||||
| #include "numaapi.h" | #include "numaapi.h" | ||||
| #if defined(__APPLE__) && defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2) && \ | #if defined(__APPLE__) && defined(_OPENMP) && (__GNUC__ == 4) && (__GNUC_MINOR__ == 2) && \ | ||||
| !defined(__clang__) | !defined(__clang__) | ||||
| # define USE_APPLE_OMP_FIX | # define USE_APPLE_OMP_FIX | ||||
| #endif | #endif | ||||
| ▲ Show 20 Lines • Show All 88 Lines • ▼ Show 20 Lines | |||||
| /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c) | /* tot = 0 only initializes malloc mutex in a safe way (see sequence.c) | ||||
| * problem otherwise: scene render will kill of the mutex! | * problem otherwise: scene render will kill of the mutex! | ||||
| */ | */ | ||||
| void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int tot) | void BLI_threadpool_init(ListBase *threadbase, void *(*do_thread)(void *), int tot) | ||||
| { | { | ||||
| int a; | int a; | ||||
| if (threadbase != NULL && tot > 0) { | if (threadbase != nullptr && tot > 0) { | ||||
| BLI_listbase_clear(threadbase); | BLI_listbase_clear(threadbase); | ||||
| if (tot > RE_MAX_THREAD) { | if (tot > RE_MAX_THREAD) { | ||||
| tot = RE_MAX_THREAD; | tot = RE_MAX_THREAD; | ||||
| } | } | ||||
| else if (tot < 1) { | else if (tot < 1) { | ||||
| tot = 1; | tot = 1; | ||||
| } | } | ||||
| for (a = 0; a < tot; a++) { | for (a = 0; a < tot; a++) { | ||||
| ThreadSlot *tslot = MEM_callocN(sizeof(ThreadSlot), "threadslot"); | ThreadSlot *tslot = static_cast<ThreadSlot *>(MEM_callocN(sizeof(ThreadSlot), "threadslot")); | ||||
| BLI_addtail(threadbase, tslot); | BLI_addtail(threadbase, tslot); | ||||
| tslot->do_thread = do_thread; | tslot->do_thread = do_thread; | ||||
| tslot->avail = 1; | tslot->avail = 1; | ||||
| } | } | ||||
| } | } | ||||
| unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1); | unsigned int level = atomic_fetch_and_add_u(&thread_levels, 1); | ||||
| if (level == 0) { | if (level == 0) { | ||||
| #ifdef USE_APPLE_OMP_FIX | #ifdef USE_APPLE_OMP_FIX | ||||
| /* workaround for Apple gcc 4.2.1 omp vs background thread bug, | /* workaround for Apple gcc 4.2.1 omp vs background thread bug, | ||||
| * we copy gomp thread local storage pointer to setting it again | * we copy gomp thread local storage pointer to setting it again | ||||
| * inside the thread that we start */ | * inside the thread that we start */ | ||||
| thread_tls_data = pthread_getspecific(gomp_tls_key); | thread_tls_data = pthread_getspecific(gomp_tls_key); | ||||
| #endif | #endif | ||||
| } | } | ||||
| } | } | ||||
| /* amount of available threads */ | /* amount of available threads */ | ||||
| int BLI_available_threads(ListBase *threadbase) | int BLI_available_threads(ListBase *threadbase) | ||||
| { | { | ||||
| ThreadSlot *tslot; | |||||
| int counter = 0; | int counter = 0; | ||||
| for (tslot = threadbase->first; tslot; tslot = tslot->next) { | LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { | ||||
| if (tslot->avail) { | if (tslot->avail) { | ||||
| counter++; | counter++; | ||||
| } | } | ||||
| } | } | ||||
| return counter; | return counter; | ||||
| } | } | ||||
| /* returns thread number, for sample patterns or threadsafe tables */ | /* returns thread number, for sample patterns or threadsafe tables */ | ||||
| int BLI_threadpool_available_thread_index(ListBase *threadbase) | int BLI_threadpool_available_thread_index(ListBase *threadbase) | ||||
| { | { | ||||
| ThreadSlot *tslot; | |||||
| int counter = 0; | int counter = 0; | ||||
| for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) { | LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { | ||||
| if (tslot->avail) { | if (tslot->avail) { | ||||
| return counter; | return counter; | ||||
| } | } | ||||
| ++counter; | |||||
| } | } | ||||
| return 0; | return 0; | ||||
| } | } | ||||
| static void *tslot_thread_start(void *tslot_p) | static void *tslot_thread_start(void *tslot_p) | ||||
| { | { | ||||
| ThreadSlot *tslot = (ThreadSlot *)tslot_p; | ThreadSlot *tslot = (ThreadSlot *)tslot_p; | ||||
| #ifdef USE_APPLE_OMP_FIX | #ifdef USE_APPLE_OMP_FIX | ||||
| /* workaround for Apple gcc 4.2.1 omp vs background thread bug, | /* workaround for Apple gcc 4.2.1 omp vs background thread bug, | ||||
| * set gomp thread local storage pointer which was copied beforehand */ | * set gomp thread local storage pointer which was copied beforehand */ | ||||
| pthread_setspecific(gomp_tls_key, thread_tls_data); | pthread_setspecific(gomp_tls_key, thread_tls_data); | ||||
| #endif | #endif | ||||
| return tslot->do_thread(tslot->callerdata); | return tslot->do_thread(tslot->callerdata); | ||||
| } | } | ||||
| int BLI_thread_is_main(void) | int BLI_thread_is_main(void) | ||||
| { | { | ||||
| return pthread_equal(pthread_self(), mainid); | return pthread_equal(pthread_self(), mainid); | ||||
| } | } | ||||
| void BLI_threadpool_insert(ListBase *threadbase, void *callerdata) | void BLI_threadpool_insert(ListBase *threadbase, void *callerdata) | ||||
| { | { | ||||
| ThreadSlot *tslot; | LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { | ||||
| for (tslot = threadbase->first; tslot; tslot = tslot->next) { | |||||
| if (tslot->avail) { | if (tslot->avail) { | ||||
| tslot->avail = 0; | tslot->avail = 0; | ||||
| tslot->callerdata = callerdata; | tslot->callerdata = callerdata; | ||||
| pthread_create(&tslot->pthread, NULL, tslot_thread_start, tslot); | pthread_create(&tslot->pthread, nullptr, tslot_thread_start, tslot); | ||||
| return; | return; | ||||
| } | } | ||||
| } | } | ||||
| printf("ERROR: could not insert thread slot\n"); | printf("ERROR: could not insert thread slot\n"); | ||||
| } | } | ||||
| void BLI_threadpool_remove(ListBase *threadbase, void *callerdata) | void BLI_threadpool_remove(ListBase *threadbase, void *callerdata) | ||||
| { | { | ||||
| ThreadSlot *tslot; | LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { | ||||
| for (tslot = threadbase->first; tslot; tslot = tslot->next) { | |||||
| if (tslot->callerdata == callerdata) { | if (tslot->callerdata == callerdata) { | ||||
| pthread_join(tslot->pthread, NULL); | pthread_join(tslot->pthread, nullptr); | ||||
| tslot->callerdata = NULL; | tslot->callerdata = nullptr; | ||||
| tslot->avail = 1; | tslot->avail = 1; | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| void BLI_threadpool_remove_index(ListBase *threadbase, int index) | void BLI_threadpool_remove_index(ListBase *threadbase, int index) | ||||
| { | { | ||||
| ThreadSlot *tslot; | |||||
| int counter = 0; | int counter = 0; | ||||
| for (tslot = threadbase->first; tslot; tslot = tslot->next, counter++) { | LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { | ||||
| if (counter == index && tslot->avail == 0) { | if (counter == index && tslot->avail == 0) { | ||||
| pthread_join(tslot->pthread, NULL); | pthread_join(tslot->pthread, nullptr); | ||||
| tslot->callerdata = NULL; | tslot->callerdata = nullptr; | ||||
| tslot->avail = 1; | tslot->avail = 1; | ||||
| break; | break; | ||||
| } | } | ||||
| ++counter; | |||||
| } | } | ||||
| } | } | ||||
| void BLI_threadpool_clear(ListBase *threadbase) | void BLI_threadpool_clear(ListBase *threadbase) | ||||
| { | { | ||||
| ThreadSlot *tslot; | LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { | ||||
| for (tslot = threadbase->first; tslot; tslot = tslot->next) { | |||||
| if (tslot->avail == 0) { | if (tslot->avail == 0) { | ||||
| pthread_join(tslot->pthread, NULL); | pthread_join(tslot->pthread, nullptr); | ||||
| tslot->callerdata = NULL; | tslot->callerdata = nullptr; | ||||
| tslot->avail = 1; | tslot->avail = 1; | ||||
| } | } | ||||
| } | } | ||||
| } | } | ||||
| void BLI_threadpool_end(ListBase *threadbase) | void BLI_threadpool_end(ListBase *threadbase) | ||||
| { | { | ||||
| ThreadSlot *tslot; | |||||
| /* only needed if there's actually some stuff to end | /* only needed if there's actually some stuff to end | ||||
| * this way we don't end up decrementing thread_levels on an empty threadbase | * this way we don't end up decrementing thread_levels on an empty threadbase | ||||
| * */ | * */ | ||||
| if (threadbase && (BLI_listbase_is_empty(threadbase) == false)) { | if (threadbase == nullptr || BLI_listbase_is_empty(threadbase)) { | ||||
| for (tslot = threadbase->first; tslot; tslot = tslot->next) { | return; | ||||
| } | |||||
| LISTBASE_FOREACH (ThreadSlot *, tslot, threadbase) { | |||||
| if (tslot->avail == 0) { | if (tslot->avail == 0) { | ||||
| pthread_join(tslot->pthread, NULL); | pthread_join(tslot->pthread, nullptr); | ||||
| } | } | ||||
| } | } | ||||
| BLI_freelistN(threadbase); | BLI_freelistN(threadbase); | ||||
| } | } | ||||
| } | |||||
| /* System Information */ | /* System Information */ | ||||
| /* how many threads are native on this system? */ | /* how many threads are native on this system? */ | ||||
| int BLI_system_thread_count(void) | int BLI_system_thread_count(void) | ||||
| { | { | ||||
| static int t = -1; | static int t = -1; | ||||
| Show All 12 Lines | |||||
| #else | #else | ||||
| # ifdef __APPLE__ | # ifdef __APPLE__ | ||||
| int mib[2]; | int mib[2]; | ||||
| size_t len; | size_t len; | ||||
| mib[0] = CTL_HW; | mib[0] = CTL_HW; | ||||
| mib[1] = HW_NCPU; | mib[1] = HW_NCPU; | ||||
| len = sizeof(t); | len = sizeof(t); | ||||
| sysctl(mib, 2, &t, &len, NULL, 0); | sysctl(mib, 2, &t, &len, nullptr, 0); | ||||
| # else | # else | ||||
| t = (int)sysconf(_SC_NPROCESSORS_ONLN); | t = (int)sysconf(_SC_NPROCESSORS_ONLN); | ||||
| # endif | # endif | ||||
| #endif | #endif | ||||
| } | } | ||||
| CLAMP(t, 1, RE_MAX_THREAD); | CLAMP(t, 1, RE_MAX_THREAD); | ||||
| Show All 34 Lines | switch (type) { | ||||
| case LOCK_COLORMANAGE: | case LOCK_COLORMANAGE: | ||||
| return &_colormanage_lock; | return &_colormanage_lock; | ||||
| case LOCK_FFTW: | case LOCK_FFTW: | ||||
| return &_fftw_lock; | return &_fftw_lock; | ||||
| case LOCK_VIEW3D: | case LOCK_VIEW3D: | ||||
| return &_view3d_lock; | return &_view3d_lock; | ||||
| default: | default: | ||||
| BLI_assert(0); | BLI_assert(0); | ||||
| return NULL; | return nullptr; | ||||
| } | } | ||||
| } | } | ||||
| void BLI_thread_lock(int type) | void BLI_thread_lock(int type) | ||||
| { | { | ||||
| pthread_mutex_lock(global_mutex_from_type(type)); | pthread_mutex_lock(global_mutex_from_type(type)); | ||||
| } | } | ||||
| void BLI_thread_unlock(int type) | void BLI_thread_unlock(int type) | ||||
| { | { | ||||
| pthread_mutex_unlock(global_mutex_from_type(type)); | pthread_mutex_unlock(global_mutex_from_type(type)); | ||||
| } | } | ||||
| /* Mutex Locks */ | /* Mutex Locks */ | ||||
| void BLI_mutex_init(ThreadMutex *mutex) | void BLI_mutex_init(ThreadMutex *mutex) | ||||
| { | { | ||||
| pthread_mutex_init(mutex, NULL); | pthread_mutex_init(mutex, nullptr); | ||||
| } | } | ||||
| void BLI_mutex_lock(ThreadMutex *mutex) | void BLI_mutex_lock(ThreadMutex *mutex) | ||||
| { | { | ||||
| pthread_mutex_lock(mutex); | pthread_mutex_lock(mutex); | ||||
| } | } | ||||
| void BLI_mutex_unlock(ThreadMutex *mutex) | void BLI_mutex_unlock(ThreadMutex *mutex) | ||||
| { | { | ||||
| pthread_mutex_unlock(mutex); | pthread_mutex_unlock(mutex); | ||||
| } | } | ||||
| bool BLI_mutex_trylock(ThreadMutex *mutex) | bool BLI_mutex_trylock(ThreadMutex *mutex) | ||||
| { | { | ||||
| return (pthread_mutex_trylock(mutex) == 0); | return (pthread_mutex_trylock(mutex) == 0); | ||||
| } | } | ||||
| void BLI_mutex_end(ThreadMutex *mutex) | void BLI_mutex_end(ThreadMutex *mutex) | ||||
| { | { | ||||
| pthread_mutex_destroy(mutex); | pthread_mutex_destroy(mutex); | ||||
| } | } | ||||
| ThreadMutex *BLI_mutex_alloc(void) | ThreadMutex *BLI_mutex_alloc(void) | ||||
| { | { | ||||
| ThreadMutex *mutex = MEM_callocN(sizeof(ThreadMutex), "ThreadMutex"); | ThreadMutex *mutex = static_cast<ThreadMutex *>(MEM_callocN(sizeof(ThreadMutex), "ThreadMutex")); | ||||
| BLI_mutex_init(mutex); | BLI_mutex_init(mutex); | ||||
| return mutex; | return mutex; | ||||
| } | } | ||||
| void BLI_mutex_free(ThreadMutex *mutex) | void BLI_mutex_free(ThreadMutex *mutex) | ||||
| { | { | ||||
| BLI_mutex_end(mutex); | BLI_mutex_end(mutex); | ||||
| MEM_freeN(mutex); | MEM_freeN(mutex); | ||||
| } | } | ||||
| /* Spin Locks */ | /* Spin Locks */ | ||||
| #if WITH_TBB | |||||
| static tbb::spin_mutex *tbb_spin_mutex_cast(SpinLock *spin) | |||||
| { | |||||
| static_assert(sizeof(SpinLock) >= sizeof(tbb::spin_mutex), | |||||
| "SpinLock must match tbb::spin_mutex"); | |||||
| static_assert(alignof(SpinLock) % alignof(tbb::spin_mutex) == 0, | |||||
| "SpinLock must be aligned same as tbb::spin_mutex"); | |||||
| return reinterpret_cast<tbb::spin_mutex *>(spin); | |||||
| } | |||||
| #endif | |||||
| void BLI_spin_init(SpinLock *spin) | void BLI_spin_init(SpinLock *spin) | ||||
| { | { | ||||
| #if defined(__APPLE__) | #ifdef WITH_TBB | ||||
| *spin = OS_SPINLOCK_INIT; | tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); | ||||
| new (spin_mutex) tbb::spin_mutex(); | |||||
| #elif defined(__APPLE__) | |||||
| BLI_mutex_init(spin); | |||||
| #elif defined(_MSC_VER) | #elif defined(_MSC_VER) | ||||
| *spin = 0; | *spin = 0; | ||||
| #else | #else | ||||
| pthread_spin_init(spin, 0); | pthread_spin_init(spin, 0); | ||||
| #endif | #endif | ||||
| } | } | ||||
| void BLI_spin_lock(SpinLock *spin) | void BLI_spin_lock(SpinLock *spin) | ||||
| { | { | ||||
| #if defined(__APPLE__) | #ifdef WITH_TBB | ||||
| OSSpinLockLock(spin); | tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); | ||||
| spin_mutex->lock(); | |||||
| #elif defined(__APPLE__) | |||||
| BLI_mutex_lock(spin); | |||||
| #elif defined(_MSC_VER) | #elif defined(_MSC_VER) | ||||
| while (InterlockedExchangeAcquire(spin, 1)) { | while (InterlockedExchangeAcquire(spin, 1)) { | ||||
| while (*spin) { | while (*spin) { | ||||
| /* Spin-lock hint for processors with hyperthreading. */ | /* Spin-lock hint for processors with hyperthreading. */ | ||||
| YieldProcessor(); | YieldProcessor(); | ||||
| } | } | ||||
| } | } | ||||
| #else | #else | ||||
| pthread_spin_lock(spin); | pthread_spin_lock(spin); | ||||
| #endif | #endif | ||||
| } | } | ||||
| void BLI_spin_unlock(SpinLock *spin) | void BLI_spin_unlock(SpinLock *spin) | ||||
| { | { | ||||
| #if defined(__APPLE__) | #ifdef WITH_TBB | ||||
| OSSpinLockUnlock(spin); | tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); | ||||
| spin_mutex->unlock(); | |||||
| #elif defined(__APPLE__) | |||||
| BLI_mutex_unlock(spin); | |||||
| #elif defined(_MSC_VER) | #elif defined(_MSC_VER) | ||||
| _ReadWriteBarrier(); | _ReadWriteBarrier(); | ||||
| *spin = 0; | *spin = 0; | ||||
| #else | #else | ||||
| pthread_spin_unlock(spin); | pthread_spin_unlock(spin); | ||||
| #endif | #endif | ||||
| } | } | ||||
| #if defined(__APPLE__) || defined(_MSC_VER) | |||||
| void BLI_spin_end(SpinLock *UNUSED(spin)) | |||||
| { | |||||
| } | |||||
| #else | |||||
| void BLI_spin_end(SpinLock *spin) | void BLI_spin_end(SpinLock *spin) | ||||
| { | { | ||||
| #ifdef WITH_TBB | |||||
| tbb::spin_mutex *spin_mutex = tbb_spin_mutex_cast(spin); | |||||
| spin_mutex->~spin_mutex(); | |||||
| #elif defined(__APPLE__) | |||||
| BLI_mutex_end(spin); | |||||
| #elif defined(_MSC_VER) | |||||
| BLI_mutex_unlock(spin); | |||||
| #else | |||||
| pthread_spin_destroy(spin); | pthread_spin_destroy(spin); | ||||
| } | |||||
| #endif | #endif | ||||
| } | |||||
| /* Read/Write Mutex Lock */ | /* Read/Write Mutex Lock */ | ||||
| void BLI_rw_mutex_init(ThreadRWMutex *mutex) | void BLI_rw_mutex_init(ThreadRWMutex *mutex) | ||||
| { | { | ||||
| pthread_rwlock_init(mutex, NULL); | pthread_rwlock_init(mutex, nullptr); | ||||
| } | } | ||||
| void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode) | void BLI_rw_mutex_lock(ThreadRWMutex *mutex, int mode) | ||||
| { | { | ||||
| if (mode == THREAD_LOCK_READ) { | if (mode == THREAD_LOCK_READ) { | ||||
| pthread_rwlock_rdlock(mutex); | pthread_rwlock_rdlock(mutex); | ||||
| } | } | ||||
| else { | else { | ||||
| pthread_rwlock_wrlock(mutex); | pthread_rwlock_wrlock(mutex); | ||||
| } | } | ||||
| } | } | ||||
| void BLI_rw_mutex_unlock(ThreadRWMutex *mutex) | void BLI_rw_mutex_unlock(ThreadRWMutex *mutex) | ||||
| { | { | ||||
| pthread_rwlock_unlock(mutex); | pthread_rwlock_unlock(mutex); | ||||
| } | } | ||||
| void BLI_rw_mutex_end(ThreadRWMutex *mutex) | void BLI_rw_mutex_end(ThreadRWMutex *mutex) | ||||
| { | { | ||||
| pthread_rwlock_destroy(mutex); | pthread_rwlock_destroy(mutex); | ||||
| } | } | ||||
| ThreadRWMutex *BLI_rw_mutex_alloc(void) | ThreadRWMutex *BLI_rw_mutex_alloc(void) | ||||
| { | { | ||||
| ThreadRWMutex *mutex = MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex"); | ThreadRWMutex *mutex = static_cast<ThreadRWMutex *>( | ||||
| MEM_callocN(sizeof(ThreadRWMutex), "ThreadRWMutex")); | |||||
| BLI_rw_mutex_init(mutex); | BLI_rw_mutex_init(mutex); | ||||
| return mutex; | return mutex; | ||||
| } | } | ||||
| void BLI_rw_mutex_free(ThreadRWMutex *mutex) | void BLI_rw_mutex_free(ThreadRWMutex *mutex) | ||||
| { | { | ||||
| BLI_rw_mutex_end(mutex); | BLI_rw_mutex_end(mutex); | ||||
| MEM_freeN(mutex); | MEM_freeN(mutex); | ||||
| } | } | ||||
| /* Ticket Mutex Lock */ | /* Ticket Mutex Lock */ | ||||
| struct TicketMutex { | struct TicketMutex { | ||||
| pthread_cond_t cond; | pthread_cond_t cond; | ||||
| pthread_mutex_t mutex; | pthread_mutex_t mutex; | ||||
| unsigned int queue_head, queue_tail; | unsigned int queue_head, queue_tail; | ||||
| }; | }; | ||||
| TicketMutex *BLI_ticket_mutex_alloc(void) | TicketMutex *BLI_ticket_mutex_alloc(void) | ||||
| { | { | ||||
| TicketMutex *ticket = MEM_callocN(sizeof(TicketMutex), "TicketMutex"); | TicketMutex *ticket = static_cast<TicketMutex *>( | ||||
| MEM_callocN(sizeof(TicketMutex), "TicketMutex")); | |||||
| pthread_cond_init(&ticket->cond, NULL); | pthread_cond_init(&ticket->cond, nullptr); | ||||
| pthread_mutex_init(&ticket->mutex, NULL); | pthread_mutex_init(&ticket->mutex, nullptr); | ||||
| return ticket; | return ticket; | ||||
| } | } | ||||
| void BLI_ticket_mutex_free(TicketMutex *ticket) | void BLI_ticket_mutex_free(TicketMutex *ticket) | ||||
| { | { | ||||
| pthread_mutex_destroy(&ticket->mutex); | pthread_mutex_destroy(&ticket->mutex); | ||||
| pthread_cond_destroy(&ticket->cond); | pthread_cond_destroy(&ticket->cond); | ||||
| Show All 23 Lines | |||||
| } | } | ||||
| /* ************************************************ */ | /* ************************************************ */ | ||||
| /* Condition */ | /* Condition */ | ||||
| void BLI_condition_init(ThreadCondition *cond) | void BLI_condition_init(ThreadCondition *cond) | ||||
| { | { | ||||
| pthread_cond_init(cond, NULL); | pthread_cond_init(cond, nullptr); | ||||
| } | } | ||||
| void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex) | void BLI_condition_wait(ThreadCondition *cond, ThreadMutex *mutex) | ||||
| { | { | ||||
| pthread_cond_wait(cond, mutex); | pthread_cond_wait(cond, mutex); | ||||
| } | } | ||||
| void BLI_condition_wait_global_mutex(ThreadCondition *cond, const int type) | void BLI_condition_wait_global_mutex(ThreadCondition *cond, const int type) | ||||
| Show All 26 Lines | struct ThreadQueue { | ||||
| volatile int nowait; | volatile int nowait; | ||||
| volatile int canceled; | volatile int canceled; | ||||
| }; | }; | ||||
| ThreadQueue *BLI_thread_queue_init(void) | ThreadQueue *BLI_thread_queue_init(void) | ||||
| { | { | ||||
| ThreadQueue *queue; | ThreadQueue *queue; | ||||
| queue = MEM_callocN(sizeof(ThreadQueue), "ThreadQueue"); | queue = static_cast<ThreadQueue *>(MEM_callocN(sizeof(ThreadQueue), "ThreadQueue")); | ||||
| queue->queue = BLI_gsqueue_new(sizeof(void *)); | queue->queue = BLI_gsqueue_new(sizeof(void *)); | ||||
| pthread_mutex_init(&queue->mutex, NULL); | pthread_mutex_init(&queue->mutex, nullptr); | ||||
| pthread_cond_init(&queue->push_cond, NULL); | pthread_cond_init(&queue->push_cond, nullptr); | ||||
| pthread_cond_init(&queue->finish_cond, NULL); | pthread_cond_init(&queue->finish_cond, nullptr); | ||||
| return queue; | return queue; | ||||
| } | } | ||||
| void BLI_thread_queue_free(ThreadQueue *queue) | void BLI_thread_queue_free(ThreadQueue *queue) | ||||
| { | { | ||||
| /* destroy everything, assumes no one is using queue anymore */ | /* destroy everything, assumes no one is using queue anymore */ | ||||
| pthread_cond_destroy(&queue->finish_cond); | pthread_cond_destroy(&queue->finish_cond); | ||||
| Show All 13 Lines | void BLI_thread_queue_push(ThreadQueue *queue, void *work) | ||||
| /* signal threads waiting to pop */ | /* signal threads waiting to pop */ | ||||
| pthread_cond_signal(&queue->push_cond); | pthread_cond_signal(&queue->push_cond); | ||||
| pthread_mutex_unlock(&queue->mutex); | pthread_mutex_unlock(&queue->mutex); | ||||
| } | } | ||||
| void *BLI_thread_queue_pop(ThreadQueue *queue) | void *BLI_thread_queue_pop(ThreadQueue *queue) | ||||
| { | { | ||||
| void *work = NULL; | void *work = nullptr; | ||||
| /* wait until there is work */ | /* wait until there is work */ | ||||
| pthread_mutex_lock(&queue->mutex); | pthread_mutex_lock(&queue->mutex); | ||||
| while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) { | while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) { | ||||
| pthread_cond_wait(&queue->push_cond, &queue->mutex); | pthread_cond_wait(&queue->push_cond, &queue->mutex); | ||||
| } | } | ||||
| /* if we have something, pop it */ | /* if we have something, pop it */ | ||||
| Show All 20 Lines | #ifdef WIN32 | ||||
| struct _timeb now; | struct _timeb now; | ||||
| _ftime(&now); | _ftime(&now); | ||||
| sec = now.time; | sec = now.time; | ||||
| usec = now.millitm * 1000; /* microsecond precision would be better */ | usec = now.millitm * 1000; /* microsecond precision would be better */ | ||||
| } | } | ||||
| #else | #else | ||||
| { | { | ||||
| struct timeval now; | struct timeval now; | ||||
| gettimeofday(&now, NULL); | gettimeofday(&now, nullptr); | ||||
| sec = now.tv_sec; | sec = now.tv_sec; | ||||
| usec = now.tv_usec; | usec = now.tv_usec; | ||||
| } | } | ||||
| #endif | #endif | ||||
| /* add current time + millisecond offset */ | /* add current time + millisecond offset */ | ||||
| div_result = ldiv(ms, 1000); | div_result = ldiv(ms, 1000); | ||||
| timeout->tv_sec = sec + div_result.quot; | timeout->tv_sec = sec + div_result.quot; | ||||
| x = usec + (div_result.rem * 1000); | x = usec + (div_result.rem * 1000); | ||||
| if (x >= 1000000) { | if (x >= 1000000) { | ||||
| timeout->tv_sec++; | timeout->tv_sec++; | ||||
| x -= 1000000; | x -= 1000000; | ||||
| } | } | ||||
| timeout->tv_nsec = x * 1000; | timeout->tv_nsec = x * 1000; | ||||
| } | } | ||||
| void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms) | void *BLI_thread_queue_pop_timeout(ThreadQueue *queue, int ms) | ||||
| { | { | ||||
| double t; | double t; | ||||
| void *work = NULL; | void *work = nullptr; | ||||
| struct timespec timeout; | struct timespec timeout; | ||||
| t = PIL_check_seconds_timer(); | t = PIL_check_seconds_timer(); | ||||
| wait_timeout(&timeout, ms); | wait_timeout(&timeout, ms); | ||||
| /* wait until there is work */ | /* wait until there is work */ | ||||
| pthread_mutex_lock(&queue->mutex); | pthread_mutex_lock(&queue->mutex); | ||||
| while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) { | while (BLI_gsqueue_is_empty(queue->queue) && !queue->nowait) { | ||||
| ▲ Show 20 Lines • Show All 74 Lines • ▼ Show 20 Lines | static bool check_is_threadripper2_alike_topology(void) | ||||
| * encouraged by the system. */ | * encouraged by the system. */ | ||||
| static bool is_initialized = false; | static bool is_initialized = false; | ||||
| static bool is_threadripper2 = false; | static bool is_threadripper2 = false; | ||||
| if (is_initialized) { | if (is_initialized) { | ||||
| return is_threadripper2; | return is_threadripper2; | ||||
| } | } | ||||
| is_initialized = true; | is_initialized = true; | ||||
| char *cpu_brand = BLI_cpu_brand_string(); | char *cpu_brand = BLI_cpu_brand_string(); | ||||
| if (cpu_brand == NULL) { | if (cpu_brand == nullptr) { | ||||
| return false; | return false; | ||||
| } | } | ||||
| if (strstr(cpu_brand, "Threadripper")) { | if (strstr(cpu_brand, "Threadripper")) { | ||||
| /* NOTE: We consider all Thread-rippers having similar topology to | /* NOTE: We consider all Thread-rippers having similar topology to | ||||
| * the second one. This is because we are trying to utilize NUMA node | * the second one. This is because we are trying to utilize NUMA node | ||||
| * 0 as much as possible. This node does exist on earlier versions of | * 0 as much as possible. This node does exist on earlier versions of | ||||
| * thread-ripper and setting affinity to it should not have negative | * thread-ripper and setting affinity to it should not have negative | ||||
| * effect. | * effect. | ||||
| ▲ Show 20 Lines • Show All 85 Lines • Show Last 20 Lines | |||||