blob: 3b4b7cfdb24c9254d19b638dae63609a62f9df30 [file] [log] [blame]
Brian Silverman41cdd3e2019-01-19 19:48:58 -08001/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to
5 * deal in the Software without restriction, including without limitation the
6 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7 * sell copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19 * IN THE SOFTWARE.
20 */
21
22#include "uv-common.h"
23
24#if !defined(_WIN32)
25# include "unix/internal.h"
26#endif
27
28#include <stdlib.h>
29
30#define MAX_THREADPOOL_SIZE 128
31
32static uv_once_t once = UV_ONCE_INIT;
33static uv_cond_t cond;
34static uv_mutex_t mutex;
35static unsigned int idle_threads;
36static unsigned int nthreads;
37static uv_thread_t* threads;
38static uv_thread_t default_threads[4];
39static QUEUE exit_message;
40static QUEUE wq;
41
42
43static void uv__cancelled(struct uv__work* w) {
44 abort();
45}
46
47
48/* To avoid deadlock with uv_cancel() it's crucial that the worker
49 * never holds the global mutex and the loop-local mutex at the same time.
50 */
51static void worker(void* arg) {
52 struct uv__work* w;
53 QUEUE* q;
54
55 uv_sem_post((uv_sem_t*) arg);
56 arg = NULL;
57
58 for (;;) {
59 uv_mutex_lock(&mutex);
60
61 while (QUEUE_EMPTY(&wq)) {
62 idle_threads += 1;
63 uv_cond_wait(&cond, &mutex);
64 idle_threads -= 1;
65 }
66
67 q = QUEUE_HEAD(&wq);
68
69 if (q == &exit_message)
70 uv_cond_signal(&cond);
71 else {
72 QUEUE_REMOVE(q);
73 QUEUE_INIT(q); /* Signal uv_cancel() that the work req is
74 executing. */
75 }
76
77 uv_mutex_unlock(&mutex);
78
79 if (q == &exit_message)
80 break;
81
82 w = QUEUE_DATA(q, struct uv__work, wq);
83 w->work(w);
84
85 uv_mutex_lock(&w->loop->wq_mutex);
86 w->work = NULL; /* Signal uv_cancel() that the work req is done
87 executing. */
88 QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
89 uv_async_send(&w->loop->wq_async);
90 uv_mutex_unlock(&w->loop->wq_mutex);
91 }
92}
93
94
95static void post(QUEUE* q) {
96 uv_mutex_lock(&mutex);
97 QUEUE_INSERT_TAIL(&wq, q);
98 if (idle_threads > 0)
99 uv_cond_signal(&cond);
100 uv_mutex_unlock(&mutex);
101}
102
103
104#ifndef _WIN32
105UV_DESTRUCTOR(static void cleanup(void)) {
106 unsigned int i;
107
108 if (nthreads == 0)
109 return;
110
111 post(&exit_message);
112
113 for (i = 0; i < nthreads; i++)
114 if (uv_thread_join(threads + i))
115 abort();
116
117 if (threads != default_threads)
118 uv__free(threads);
119
120 uv_mutex_destroy(&mutex);
121 uv_cond_destroy(&cond);
122
123 threads = NULL;
124 nthreads = 0;
125}
126#endif
127
128
129static void init_threads(void) {
130 unsigned int i;
131 const char* val;
132 uv_sem_t sem;
133
134 nthreads = ARRAY_SIZE(default_threads);
135 val = getenv("UV_THREADPOOL_SIZE");
136 if (val != NULL)
137 nthreads = atoi(val);
138 if (nthreads == 0)
139 nthreads = 1;
140 if (nthreads > MAX_THREADPOOL_SIZE)
141 nthreads = MAX_THREADPOOL_SIZE;
142
143 threads = default_threads;
144 if (nthreads > ARRAY_SIZE(default_threads)) {
145 threads = (uv_thread_t*)uv__malloc(nthreads * sizeof(threads[0]));
146 if (threads == NULL) {
147 nthreads = ARRAY_SIZE(default_threads);
148 threads = default_threads;
149 }
150 }
151
152 if (uv_cond_init(&cond))
153 abort();
154
155 if (uv_mutex_init(&mutex))
156 abort();
157
158 QUEUE_INIT(&wq);
159
160 if (uv_sem_init(&sem, 0))
161 abort();
162
163 for (i = 0; i < nthreads; i++)
164 if (uv_thread_create(threads + i, worker, &sem))
165 abort();
166
167 for (i = 0; i < nthreads; i++)
168 uv_sem_wait(&sem);
169
170 uv_sem_destroy(&sem);
171}
172
173
174#ifndef _WIN32
175static void reset_once(void) {
176 uv_once_t child_once = UV_ONCE_INIT;
177 memcpy(&once, &child_once, sizeof(child_once));
178}
179#endif
180
181
182static void init_once(void) {
183#ifndef _WIN32
184 /* Re-initialize the threadpool after fork.
185 * Note that this discards the global mutex and condition as well
186 * as the work queue.
187 */
188 if (pthread_atfork(NULL, NULL, &reset_once))
189 abort();
190#endif
191 init_threads();
192}
193
194
195void uv__work_submit(uv_loop_t* loop,
196 struct uv__work* w,
197 void (*work)(struct uv__work* w),
198 void (*done)(struct uv__work* w, int status)) {
199 uv_once(&once, init_once);
200 w->loop = loop;
201 w->work = work;
202 w->done = done;
203 post(&w->wq);
204}
205
206
207static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
208 int cancelled;
209
210 uv_mutex_lock(&mutex);
211 uv_mutex_lock(&w->loop->wq_mutex);
212
213 cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
214 if (cancelled)
215 QUEUE_REMOVE(&w->wq);
216
217 uv_mutex_unlock(&w->loop->wq_mutex);
218 uv_mutex_unlock(&mutex);
219
220 if (!cancelled)
221 return UV_EBUSY;
222
223 w->work = uv__cancelled;
224 uv_mutex_lock(&loop->wq_mutex);
225 QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
226 uv_async_send(&loop->wq_async);
227 uv_mutex_unlock(&loop->wq_mutex);
228
229 return 0;
230}
231
232
233void uv__work_done(uv_async_t* handle) {
234 struct uv__work* w;
235 uv_loop_t* loop;
236 QUEUE* q;
237 QUEUE wq;
238 int err;
239
240 loop = container_of(handle, uv_loop_t, wq_async);
241 uv_mutex_lock(&loop->wq_mutex);
242 QUEUE_MOVE(&loop->wq, &wq);
243 uv_mutex_unlock(&loop->wq_mutex);
244
245 while (!QUEUE_EMPTY(&wq)) {
246 q = QUEUE_HEAD(&wq);
247 QUEUE_REMOVE(q);
248
249 w = container_of(q, struct uv__work, wq);
250 err = (w->work == uv__cancelled) ? UV_ECANCELED : 0;
251 w->done(w, err);
252 }
253}
254
255
256static void uv__queue_work(struct uv__work* w) {
257 uv_work_t* req = container_of(w, uv_work_t, work_req);
258
259 req->work_cb(req);
260}
261
262
263static void uv__queue_done(struct uv__work* w, int err) {
264 uv_work_t* req;
265
266 req = container_of(w, uv_work_t, work_req);
267 uv__req_unregister(req->loop, req);
268
269 if (req->after_work_cb == NULL)
270 return;
271
272 req->after_work_cb(req, err);
273}
274
275
276int uv_queue_work(uv_loop_t* loop,
277 uv_work_t* req,
278 uv_work_cb work_cb,
279 uv_after_work_cb after_work_cb) {
280 if (work_cb == NULL)
281 return UV_EINVAL;
282
283 uv__req_init(loop, req, UV_WORK);
284 req->loop = loop;
285 req->work_cb = work_cb;
286 req->after_work_cb = after_work_cb;
287 uv__work_submit(loop, &req->work_req, uv__queue_work, uv__queue_done);
288 return 0;
289}
290
291
292int uv_cancel(uv_req_t* req) {
293 struct uv__work* wreq;
294 uv_loop_t* loop;
295
296 switch (req->type) {
297 case UV_FS:
298 loop = ((uv_fs_t*) req)->loop;
299 wreq = &((uv_fs_t*) req)->work_req;
300 break;
301 case UV_GETADDRINFO:
302 loop = ((uv_getaddrinfo_t*) req)->loop;
303 wreq = &((uv_getaddrinfo_t*) req)->work_req;
304 break;
305 case UV_GETNAMEINFO:
306 loop = ((uv_getnameinfo_t*) req)->loop;
307 wreq = &((uv_getnameinfo_t*) req)->work_req;
308 break;
309 case UV_WORK:
310 loop = ((uv_work_t*) req)->loop;
311 wreq = &((uv_work_t*) req)->work_req;
312 break;
313 default:
314 return UV_EINVAL;
315 }
316
317 return uv__work_cancel(loop, req, wreq);
318}