libuv-svace-build
470 строк · 10.8 Кб
1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2*
3* Permission is hereby granted, free of charge, to any person obtaining a copy
4* of this software and associated documentation files (the "Software"), to
5* deal in the Software without restriction, including without limitation the
6* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7* sell copies of the Software, and to permit persons to whom the Software is
8* furnished to do so, subject to the following conditions:
9*
10* The above copyright notice and this permission notice shall be included in
11* all copies or substantial portions of the Software.
12*
13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19* IN THE SOFTWARE.
20*/
21
22#include <assert.h>23#include <limits.h>24#include <stdlib.h>25
26#if defined(__MINGW64_VERSION_MAJOR)27/* MemoryBarrier expands to __mm_mfence in some cases (x86+sse2), which may
28* require this header in some versions of mingw64. */
29#include <intrin.h>30#endif31
32#include "uv.h"33#include "internal.h"34
35typedef void (*uv__once_cb)(void);36
37typedef struct {38uv__once_cb callback;39} uv__once_data_t;40
41static BOOL WINAPI uv__once_inner(INIT_ONCE *once, void* param, void** context) {42uv__once_data_t* data = param;43
44data->callback();45
46return TRUE;47}
48
49void uv_once(uv_once_t* guard, uv__once_cb callback) {50uv__once_data_t data = { .callback = callback };51InitOnceExecuteOnce(&guard->init_once, uv__once_inner, (void*) &data, NULL);52}
53
54
55/* Verify that uv_thread_t can be stored in a TLS slot. */
56STATIC_ASSERT(sizeof(uv_thread_t) <= sizeof(void*));57
58static uv_key_t uv__current_thread_key;59static uv_once_t uv__current_thread_init_guard = UV_ONCE_INIT;60
61
62static void uv__init_current_thread_key(void) {63if (uv_key_create(&uv__current_thread_key))64abort();65}
66
67
68struct thread_ctx {69void (*entry)(void* arg);70void* arg;71uv_thread_t self;72};73
74
75static UINT __stdcall uv__thread_start(void* arg) {76struct thread_ctx *ctx_p;77struct thread_ctx ctx;78
79ctx_p = arg;80ctx = *ctx_p;81uv__free(ctx_p);82
83uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);84uv_key_set(&uv__current_thread_key, ctx.self);85
86ctx.entry(ctx.arg);87
88return 0;89}
90
91
92int uv_thread_create(uv_thread_t *tid, void (*entry)(void *arg), void *arg) {93uv_thread_options_t params;94params.flags = UV_THREAD_NO_FLAGS;95return uv_thread_create_ex(tid, ¶ms, entry, arg);96}
97
98int uv_thread_create_ex(uv_thread_t* tid,99const uv_thread_options_t* params,100void (*entry)(void *arg),101void *arg) {102struct thread_ctx* ctx;103int err;104HANDLE thread;105SYSTEM_INFO sysinfo;106size_t stack_size;107size_t pagesize;108
109stack_size =110params->flags & UV_THREAD_HAS_STACK_SIZE ? params->stack_size : 0;111
112if (stack_size != 0) {113GetNativeSystemInfo(&sysinfo);114pagesize = (size_t)sysinfo.dwPageSize;115/* Round up to the nearest page boundary. */116stack_size = (stack_size + pagesize - 1) &~ (pagesize - 1);117
118if ((unsigned)stack_size != stack_size)119return UV_EINVAL;120}121
122ctx = uv__malloc(sizeof(*ctx));123if (ctx == NULL)124return UV_ENOMEM;125
126ctx->entry = entry;127ctx->arg = arg;128
129/* Create the thread in suspended state so we have a chance to pass130* its own creation handle to it */
131thread = (HANDLE) _beginthreadex(NULL,132(unsigned)stack_size,133uv__thread_start,134ctx,135CREATE_SUSPENDED,136NULL);137if (thread == NULL) {138err = errno;139uv__free(ctx);140} else {141err = 0;142*tid = thread;143ctx->self = thread;144ResumeThread(thread);145}146
147switch (err) {148case 0:149return 0;150case EACCES:151return UV_EACCES;152case EAGAIN:153return UV_EAGAIN;154case EINVAL:155return UV_EINVAL;156}157
158return UV_EIO;159}
160
161int uv_thread_setaffinity(uv_thread_t* tid,162char* cpumask,163char* oldmask,164size_t mask_size) {165int i;166HANDLE hproc;167DWORD_PTR procmask;168DWORD_PTR sysmask;169DWORD_PTR threadmask;170DWORD_PTR oldthreadmask;171int cpumasksize;172
173cpumasksize = uv_cpumask_size();174assert(cpumasksize > 0);175if (mask_size < (size_t)cpumasksize)176return UV_EINVAL;177
178hproc = GetCurrentProcess();179if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))180return uv_translate_sys_error(GetLastError());181
182threadmask = 0;183for (i = 0; i < cpumasksize; i++) {184if (cpumask[i]) {185if (procmask & (1 << i))186threadmask |= 1 << i;187else188return UV_EINVAL;189}190}191
192oldthreadmask = SetThreadAffinityMask(*tid, threadmask);193if (oldthreadmask == 0)194return uv_translate_sys_error(GetLastError());195
196if (oldmask != NULL) {197for (i = 0; i < cpumasksize; i++)198oldmask[i] = (oldthreadmask >> i) & 1;199}200
201return 0;202}
203
204int uv_thread_getaffinity(uv_thread_t* tid,205char* cpumask,206size_t mask_size) {207int i;208HANDLE hproc;209DWORD_PTR procmask;210DWORD_PTR sysmask;211DWORD_PTR threadmask;212int cpumasksize;213
214cpumasksize = uv_cpumask_size();215assert(cpumasksize > 0);216if (mask_size < (size_t)cpumasksize)217return UV_EINVAL;218
219hproc = GetCurrentProcess();220if (!GetProcessAffinityMask(hproc, &procmask, &sysmask))221return uv_translate_sys_error(GetLastError());222
223threadmask = SetThreadAffinityMask(*tid, procmask);224if (threadmask == 0 || SetThreadAffinityMask(*tid, threadmask) == 0)225return uv_translate_sys_error(GetLastError());226
227for (i = 0; i < cpumasksize; i++)228cpumask[i] = (threadmask >> i) & 1;229
230return 0;231}
232
233int uv_thread_getcpu(void) {234return GetCurrentProcessorNumber();235}
236
237uv_thread_t uv_thread_self(void) {238uv_thread_t key;239uv_once(&uv__current_thread_init_guard, uv__init_current_thread_key);240key = uv_key_get(&uv__current_thread_key);241if (key == NULL) {242/* If the thread wasn't started by uv_thread_create (such as the main243* thread), we assign an id to it now. */
244if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(),245GetCurrentProcess(), &key, 0,246FALSE, DUPLICATE_SAME_ACCESS)) {247uv_fatal_error(GetLastError(), "DuplicateHandle");248}249uv_key_set(&uv__current_thread_key, key);250}251return key;252}
253
254
255int uv_thread_join(uv_thread_t *tid) {256if (WaitForSingleObject(*tid, INFINITE))257return uv_translate_sys_error(GetLastError());258else {259CloseHandle(*tid);260*tid = 0;261MemoryBarrier(); /* For feature parity with pthread_join(). */262return 0;263}264}
265
266
267int uv_thread_equal(const uv_thread_t* t1, const uv_thread_t* t2) {268return *t1 == *t2;269}
270
271
272int uv_mutex_init(uv_mutex_t* mutex) {273InitializeCriticalSection(mutex);274return 0;275}
276
277
278int uv_mutex_init_recursive(uv_mutex_t* mutex) {279return uv_mutex_init(mutex);280}
281
282
283void uv_mutex_destroy(uv_mutex_t* mutex) {284DeleteCriticalSection(mutex);285}
286
287
288void uv_mutex_lock(uv_mutex_t* mutex) {289EnterCriticalSection(mutex);290}
291
292
293int uv_mutex_trylock(uv_mutex_t* mutex) {294if (TryEnterCriticalSection(mutex))295return 0;296else297return UV_EBUSY;298}
299
300
301void uv_mutex_unlock(uv_mutex_t* mutex) {302LeaveCriticalSection(mutex);303}
304
305/* Ensure that the ABI for this type remains stable in v1.x */
306#ifdef _WIN64307STATIC_ASSERT(sizeof(uv_rwlock_t) == 80);308#else309STATIC_ASSERT(sizeof(uv_rwlock_t) == 48);310#endif311
312int uv_rwlock_init(uv_rwlock_t* rwlock) {313memset(rwlock, 0, sizeof(*rwlock));314InitializeSRWLock(&rwlock->read_write_lock_);315
316return 0;317}
318
319
320void uv_rwlock_destroy(uv_rwlock_t* rwlock) {321/* SRWLock does not need explicit destruction so long as there are no waiting threads322See: https://docs.microsoft.com/windows/win32/api/synchapi/nf-synchapi-initializesrwlock#remarks */
323}
324
325
326void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {327AcquireSRWLockShared(&rwlock->read_write_lock_);328}
329
330
331int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {332if (!TryAcquireSRWLockShared(&rwlock->read_write_lock_))333return UV_EBUSY;334
335return 0;336}
337
338
339void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {340ReleaseSRWLockShared(&rwlock->read_write_lock_);341}
342
343
344void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {345AcquireSRWLockExclusive(&rwlock->read_write_lock_);346}
347
348
349int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {350if (!TryAcquireSRWLockExclusive(&rwlock->read_write_lock_))351return UV_EBUSY;352
353return 0;354}
355
356
357void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {358ReleaseSRWLockExclusive(&rwlock->read_write_lock_);359}
360
361
362int uv_sem_init(uv_sem_t* sem, unsigned int value) {363*sem = CreateSemaphore(NULL, value, INT_MAX, NULL);364if (*sem == NULL)365return uv_translate_sys_error(GetLastError());366else367return 0;368}
369
370
371void uv_sem_destroy(uv_sem_t* sem) {372if (!CloseHandle(*sem))373abort();374}
375
376
377void uv_sem_post(uv_sem_t* sem) {378if (!ReleaseSemaphore(*sem, 1, NULL))379abort();380}
381
382
383void uv_sem_wait(uv_sem_t* sem) {384if (WaitForSingleObject(*sem, INFINITE) != WAIT_OBJECT_0)385abort();386}
387
388
389int uv_sem_trywait(uv_sem_t* sem) {390DWORD r = WaitForSingleObject(*sem, 0);391
392if (r == WAIT_OBJECT_0)393return 0;394
395if (r == WAIT_TIMEOUT)396return UV_EAGAIN;397
398abort();399return -1; /* Satisfy the compiler. */400}
401
402
403int uv_cond_init(uv_cond_t* cond) {404InitializeConditionVariable(&cond->cond_var);405return 0;406}
407
408
409void uv_cond_destroy(uv_cond_t* cond) {410/* nothing to do */411(void) &cond;412}
413
414
415void uv_cond_signal(uv_cond_t* cond) {416WakeConditionVariable(&cond->cond_var);417}
418
419
420void uv_cond_broadcast(uv_cond_t* cond) {421WakeAllConditionVariable(&cond->cond_var);422}
423
424
425void uv_cond_wait(uv_cond_t* cond, uv_mutex_t* mutex) {426if (!SleepConditionVariableCS(&cond->cond_var, mutex, INFINITE))427abort();428}
429
430
431int uv_cond_timedwait(uv_cond_t* cond, uv_mutex_t* mutex, uint64_t timeout) {432if (SleepConditionVariableCS(&cond->cond_var, mutex, (DWORD)(timeout / 1e6)))433return 0;434if (GetLastError() != ERROR_TIMEOUT)435abort();436return UV_ETIMEDOUT;437}
438
439
440int uv_key_create(uv_key_t* key) {441key->tls_index = TlsAlloc();442if (key->tls_index == TLS_OUT_OF_INDEXES)443return UV_ENOMEM;444return 0;445}
446
447
448void uv_key_delete(uv_key_t* key) {449if (TlsFree(key->tls_index) == FALSE)450abort();451key->tls_index = TLS_OUT_OF_INDEXES;452}
453
454
455void* uv_key_get(uv_key_t* key) {456void* value;457
458value = TlsGetValue(key->tls_index);459if (value == NULL)460if (GetLastError() != ERROR_SUCCESS)461abort();462
463return value;464}
465
466
467void uv_key_set(uv_key_t* key, void* value) {468if (TlsSetValue(key->tls_index, value) == FALSE)469abort();470}
471