libuv-svace-build
296 строк · 7.9 Кб
1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2*
3* Permission is hereby granted, free of charge, to any person obtaining a copy
4* of this software and associated documentation files (the "Software"), to
5* deal in the Software without restriction, including without limitation the
6* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7* sell copies of the Software, and to permit persons to whom the Software is
8* furnished to do so, subject to the following conditions:
9*
10* The above copyright notice and this permission notice shall be included in
11* all copies or substantial portions of the Software.
12*
13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19* IN THE SOFTWARE.
20*/
21
22#include "uv.h"
23#include "task.h"
24
25#include <stdio.h>
26#include <stdlib.h>
27#include <string.h> /* memset */
28
29#ifdef __POSIX__
30#include <pthread.h>
31#endif
32
33struct getaddrinfo_req {
34uv_thread_t thread_id;
35unsigned int counter;
36uv_loop_t* loop;
37uv_getaddrinfo_t handle;
38};
39
40
41struct fs_req {
42uv_thread_t thread_id;
43unsigned int counter;
44uv_loop_t* loop;
45uv_fs_t handle;
46};
47
48
49struct test_thread {
50uv_thread_t thread_id;
51int thread_called;
52};
53
54static void getaddrinfo_do(struct getaddrinfo_req* req);
55static void getaddrinfo_cb(uv_getaddrinfo_t* handle,
56int status,
57struct addrinfo* res);
58static void fs_do(struct fs_req* req);
59static void fs_cb(uv_fs_t* handle);
60
61static int thread_called;
62static uv_key_t tls_key;
63
64
65static void getaddrinfo_do(struct getaddrinfo_req* req) {
66int r;
67
68r = uv_getaddrinfo(req->loop,
69&req->handle,
70getaddrinfo_cb,
71"localhost",
72NULL,
73NULL);
74ASSERT_OK(r);
75}
76
77
78static void getaddrinfo_cb(uv_getaddrinfo_t* handle,
79int status,
80struct addrinfo* res) {
81struct getaddrinfo_req* req;
82
83ASSERT_OK(status);
84
85req = container_of(handle, struct getaddrinfo_req, handle);
86uv_freeaddrinfo(res);
87
88if (--req->counter)
89getaddrinfo_do(req);
90}
91
92
93static void fs_do(struct fs_req* req) {
94int r;
95
96r = uv_fs_stat(req->loop, &req->handle, ".", fs_cb);
97ASSERT_OK(r);
98}
99
100
101static void fs_cb(uv_fs_t* handle) {
102struct fs_req* req = container_of(handle, struct fs_req, handle);
103
104uv_fs_req_cleanup(handle);
105
106if (--req->counter)
107fs_do(req);
108}
109
110
111static void do_work(void* arg) {
112struct getaddrinfo_req getaddrinfo_reqs[4];
113struct fs_req fs_reqs[4];
114uv_loop_t loop;
115size_t i;
116struct test_thread* thread = arg;
117
118ASSERT_OK(uv_loop_init(&loop));
119
120for (i = 0; i < ARRAY_SIZE(getaddrinfo_reqs); i++) {
121struct getaddrinfo_req* req = getaddrinfo_reqs + i;
122req->counter = 4;
123req->loop = &loop;
124getaddrinfo_do(req);
125}
126
127for (i = 0; i < ARRAY_SIZE(fs_reqs); i++) {
128struct fs_req* req = fs_reqs + i;
129req->counter = 4;
130req->loop = &loop;
131fs_do(req);
132}
133
134ASSERT_OK(uv_run(&loop, UV_RUN_DEFAULT));
135ASSERT_OK(uv_loop_close(&loop));
136thread->thread_called = 1;
137}
138
139
140static void thread_entry(void* arg) {
141ASSERT_PTR_EQ(arg, (void *) 42);
142thread_called++;
143}
144
145
146TEST_IMPL(thread_create) {
147uv_thread_t tid;
148int r;
149
150r = uv_thread_create(&tid, thread_entry, (void *) 42);
151ASSERT_OK(r);
152
153r = uv_thread_join(&tid);
154ASSERT_OK(r);
155
156ASSERT_EQ(1, thread_called);
157
158return 0;
159}
160
161
162/* Hilariously bad test name. Run a lot of tasks in the thread pool and verify
163* that each "finished" callback is run in its originating thread.
164*/
165TEST_IMPL(threadpool_multiple_event_loops) {
166/* TODO(gengjiawen): Fix test on QEMU. */
167#if defined(__QEMU__)
168RETURN_SKIP("Test does not currently work in QEMU");
169#endif
170
171struct test_thread threads[8];
172size_t i;
173int r;
174
175memset(threads, 0, sizeof(threads));
176
177for (i = 0; i < ARRAY_SIZE(threads); i++) {
178r = uv_thread_create(&threads[i].thread_id, do_work, &threads[i]);
179ASSERT_OK(r);
180}
181
182for (i = 0; i < ARRAY_SIZE(threads); i++) {
183r = uv_thread_join(&threads[i].thread_id);
184ASSERT_OK(r);
185ASSERT_EQ(1, threads[i].thread_called);
186}
187
188return 0;
189}
190
191
192static void tls_thread(void* arg) {
193ASSERT_NULL(uv_key_get(&tls_key));
194uv_key_set(&tls_key, arg);
195ASSERT_PTR_EQ(arg, uv_key_get(&tls_key));
196uv_key_set(&tls_key, NULL);
197ASSERT_NULL(uv_key_get(&tls_key));
198}
199
200
201TEST_IMPL(thread_local_storage) {
202char name[] = "main";
203uv_thread_t threads[2];
204ASSERT_OK(uv_key_create(&tls_key));
205ASSERT_NULL(uv_key_get(&tls_key));
206uv_key_set(&tls_key, name);
207ASSERT_PTR_EQ(name, uv_key_get(&tls_key));
208ASSERT_OK(uv_thread_create(threads + 0, tls_thread, threads + 0));
209ASSERT_OK(uv_thread_create(threads + 1, tls_thread, threads + 1));
210ASSERT_OK(uv_thread_join(threads + 0));
211ASSERT_OK(uv_thread_join(threads + 1));
212uv_key_delete(&tls_key);
213return 0;
214}
215
216
217static void thread_check_stack(void* arg) {
218#if defined(__APPLE__)
219size_t expected;
220expected = arg == NULL ? 0 : ((uv_thread_options_t*)arg)->stack_size;
221/* 512 kB is the default stack size of threads other than the main thread
222* on MacOS. */
223if (expected == 0)
224expected = 512 * 1024;
225ASSERT_GE(pthread_get_stacksize_np(pthread_self()), expected);
226#elif defined(__linux__) && defined(__GLIBC__)
227size_t expected;
228struct rlimit lim;
229size_t stack_size;
230pthread_attr_t attr;
231ASSERT_OK(getrlimit(RLIMIT_STACK, &lim));
232if (lim.rlim_cur == RLIM_INFINITY)
233lim.rlim_cur = 2 << 20; /* glibc default. */
234ASSERT_OK(pthread_getattr_np(pthread_self(), &attr));
235ASSERT_OK(pthread_attr_getstacksize(&attr, &stack_size));
236expected = arg == NULL ? 0 : ((uv_thread_options_t*)arg)->stack_size;
237if (expected == 0)
238expected = (size_t)lim.rlim_cur;
239ASSERT_GE(stack_size, expected);
240ASSERT_OK(pthread_attr_destroy(&attr));
241#endif
242}
243
244
245TEST_IMPL(thread_stack_size) {
246uv_thread_t thread;
247ASSERT_OK(uv_thread_create(&thread, thread_check_stack, NULL));
248ASSERT_OK(uv_thread_join(&thread));
249return 0;
250}
251
252TEST_IMPL(thread_stack_size_explicit) {
253uv_thread_t thread;
254uv_thread_options_t options;
255
256options.flags = UV_THREAD_HAS_STACK_SIZE;
257options.stack_size = 1024 * 1024;
258ASSERT_OK(uv_thread_create_ex(&thread, &options,
259thread_check_stack, &options));
260ASSERT_OK(uv_thread_join(&thread));
261
262options.stack_size = 8 * 1024 * 1024; /* larger than most default os sizes */
263ASSERT_OK(uv_thread_create_ex(&thread, &options,
264thread_check_stack, &options));
265ASSERT_OK(uv_thread_join(&thread));
266
267options.stack_size = 0;
268ASSERT_OK(uv_thread_create_ex(&thread, &options,
269thread_check_stack, &options));
270ASSERT_OK(uv_thread_join(&thread));
271
272options.stack_size = 42;
273ASSERT_OK(uv_thread_create_ex(&thread, &options,
274thread_check_stack, &options));
275ASSERT_OK(uv_thread_join(&thread));
276
277#ifdef PTHREAD_STACK_MIN
278options.stack_size = PTHREAD_STACK_MIN - 42; /* unaligned size */
279ASSERT_OK(uv_thread_create_ex(&thread, &options,
280thread_check_stack, &options));
281ASSERT_OK(uv_thread_join(&thread));
282
283options.stack_size = PTHREAD_STACK_MIN / 2 - 42; /* unaligned size */
284ASSERT_OK(uv_thread_create_ex(&thread, &options,
285thread_check_stack, &options));
286ASSERT_OK(uv_thread_join(&thread));
287#endif
288
289/* unaligned size, should be larger than PTHREAD_STACK_MIN */
290options.stack_size = 1234567;
291ASSERT_OK(uv_thread_create_ex(&thread, &options,
292thread_check_stack, &options));
293ASSERT_OK(uv_thread_join(&thread));
294
295return 0;
296}
297