libuv-svace-build
1674 строки · 49.0 Кб
1/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
2*
3* Permission is hereby granted, free of charge, to any person obtaining a copy
4* of this software and associated documentation files (the "Software"), to
5* deal in the Software without restriction, including without limitation the
6* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
7* sell copies of the Software, and to permit persons to whom the Software is
8* furnished to do so, subject to the following conditions:
9*
10* The above copyright notice and this permission notice shall be included in
11* all copies or substantial portions of the Software.
12*
13* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
18* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
19* IN THE SOFTWARE.
20*/
21
22#include <assert.h>
23#include <stdlib.h>
24
25#include "uv.h"
26#include "internal.h"
27#include "handle-inl.h"
28#include "stream-inl.h"
29#include "req-inl.h"
30
31
32/*
33* Number of simultaneous pending AcceptEx calls.
34*/
35const unsigned int uv_simultaneous_server_accepts = 32;
36
37/* A zero-size buffer for use by uv_tcp_read */
38static char uv_zero_[] = "";
39
40static int uv__tcp_nodelay(uv_tcp_t* handle, SOCKET socket, int enable) {
41if (setsockopt(socket,
42IPPROTO_TCP,
43TCP_NODELAY,
44(const char*)&enable,
45sizeof enable) == -1) {
46return WSAGetLastError();
47}
48return 0;
49}
50
51
52static int uv__tcp_keepalive(uv_tcp_t* handle, SOCKET socket, int enable, unsigned int delay) {
53if (setsockopt(socket,
54SOL_SOCKET,
55SO_KEEPALIVE,
56(const char*)&enable,
57sizeof enable) == -1) {
58return WSAGetLastError();
59}
60
61if (!enable)
62return 0;
63
64if (delay < 1)
65return UV_EINVAL;
66
67if (setsockopt(socket,
68IPPROTO_TCP,
69TCP_KEEPALIVE,
70(const char*)&delay,
71sizeof delay) == -1) {
72return WSAGetLastError();
73}
74
75return 0;
76}
77
78
79static int uv__tcp_set_socket(uv_loop_t* loop,
80uv_tcp_t* handle,
81SOCKET socket,
82int family,
83int imported) {
84DWORD yes = 1;
85int non_ifs_lsp;
86int err;
87
88if (handle->socket != INVALID_SOCKET)
89return UV_EBUSY;
90
91/* Set the socket to nonblocking mode */
92if (ioctlsocket(socket, FIONBIO, &yes) == SOCKET_ERROR) {
93return WSAGetLastError();
94}
95
96/* Make the socket non-inheritable */
97if (!SetHandleInformation((HANDLE) socket, HANDLE_FLAG_INHERIT, 0))
98return GetLastError();
99
100/* Associate it with the I/O completion port. Use uv_handle_t pointer as
101* completion key. */
102if (CreateIoCompletionPort((HANDLE)socket,
103loop->iocp,
104(ULONG_PTR)socket,
1050) == NULL) {
106if (imported) {
107handle->flags |= UV_HANDLE_EMULATE_IOCP;
108} else {
109return GetLastError();
110}
111}
112
113if (family == AF_INET6) {
114non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv6;
115} else {
116non_ifs_lsp = uv_tcp_non_ifs_lsp_ipv4;
117}
118
119if (!(handle->flags & UV_HANDLE_EMULATE_IOCP) && !non_ifs_lsp) {
120UCHAR sfcnm_flags =
121FILE_SKIP_SET_EVENT_ON_HANDLE | FILE_SKIP_COMPLETION_PORT_ON_SUCCESS;
122if (!SetFileCompletionNotificationModes((HANDLE) socket, sfcnm_flags))
123return GetLastError();
124handle->flags |= UV_HANDLE_SYNC_BYPASS_IOCP;
125}
126
127if (handle->flags & UV_HANDLE_TCP_NODELAY) {
128err = uv__tcp_nodelay(handle, socket, 1);
129if (err)
130return err;
131}
132
133/* TODO: Use stored delay. */
134if (handle->flags & UV_HANDLE_TCP_KEEPALIVE) {
135err = uv__tcp_keepalive(handle, socket, 1, 60);
136if (err)
137return err;
138}
139
140handle->socket = socket;
141
142if (family == AF_INET6) {
143handle->flags |= UV_HANDLE_IPV6;
144} else {
145assert(!(handle->flags & UV_HANDLE_IPV6));
146}
147
148return 0;
149}
150
151
152int uv_tcp_init_ex(uv_loop_t* loop, uv_tcp_t* handle, unsigned int flags) {
153int domain;
154
155/* Use the lower 8 bits for the domain */
156domain = flags & 0xFF;
157if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNSPEC)
158return UV_EINVAL;
159
160if (flags & ~0xFF)
161return UV_EINVAL;
162
163uv__stream_init(loop, (uv_stream_t*) handle, UV_TCP);
164handle->tcp.serv.accept_reqs = NULL;
165handle->tcp.serv.pending_accepts = NULL;
166handle->socket = INVALID_SOCKET;
167handle->reqs_pending = 0;
168handle->tcp.serv.func_acceptex = NULL;
169handle->tcp.conn.func_connectex = NULL;
170handle->tcp.serv.processed_accepts = 0;
171handle->delayed_error = 0;
172
173/* If anything fails beyond this point we need to remove the handle from
174* the handle queue, since it was added by uv__handle_init in uv__stream_init.
175*/
176
177if (domain != AF_UNSPEC) {
178SOCKET sock;
179DWORD err;
180
181sock = socket(domain, SOCK_STREAM, 0);
182if (sock == INVALID_SOCKET) {
183err = WSAGetLastError();
184uv__queue_remove(&handle->handle_queue);
185return uv_translate_sys_error(err);
186}
187
188err = uv__tcp_set_socket(handle->loop, handle, sock, domain, 0);
189if (err) {
190closesocket(sock);
191uv__queue_remove(&handle->handle_queue);
192return uv_translate_sys_error(err);
193}
194
195}
196
197return 0;
198}
199
200
201int uv_tcp_init(uv_loop_t* loop, uv_tcp_t* handle) {
202return uv_tcp_init_ex(loop, handle, AF_UNSPEC);
203}
204
205
206void uv__process_tcp_shutdown_req(uv_loop_t* loop, uv_tcp_t* stream, uv_shutdown_t *req) {
207int err;
208
209assert(req);
210assert(stream->stream.conn.write_reqs_pending == 0);
211assert(!(stream->flags & UV_HANDLE_SHUT));
212assert(stream->flags & UV_HANDLE_CONNECTION);
213
214stream->stream.conn.shutdown_req = NULL;
215UNREGISTER_HANDLE_REQ(loop, stream, req);
216
217err = 0;
218if (stream->flags & UV_HANDLE_CLOSING)
219/* The user destroyed the stream before we got to do the shutdown. */
220err = UV_ECANCELED;
221else if (shutdown(stream->socket, SD_SEND) == SOCKET_ERROR)
222err = uv_translate_sys_error(WSAGetLastError());
223else /* Success. */
224stream->flags |= UV_HANDLE_SHUT;
225
226if (req->cb)
227req->cb(req, err);
228
229DECREASE_PENDING_REQ_COUNT(stream);
230}
231
232
233void uv__tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
234unsigned int i;
235uv_tcp_accept_t* req;
236
237assert(handle->flags & UV_HANDLE_CLOSING);
238assert(handle->reqs_pending == 0);
239assert(!(handle->flags & UV_HANDLE_CLOSED));
240assert(handle->socket == INVALID_SOCKET);
241
242if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->tcp.serv.accept_reqs) {
243if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
244for (i = 0; i < uv_simultaneous_server_accepts; i++) {
245req = &handle->tcp.serv.accept_reqs[i];
246if (req->wait_handle != INVALID_HANDLE_VALUE) {
247UnregisterWait(req->wait_handle);
248req->wait_handle = INVALID_HANDLE_VALUE;
249}
250if (req->event_handle != NULL) {
251CloseHandle(req->event_handle);
252req->event_handle = NULL;
253}
254}
255}
256
257uv__free(handle->tcp.serv.accept_reqs);
258handle->tcp.serv.accept_reqs = NULL;
259}
260
261if (handle->flags & UV_HANDLE_CONNECTION &&
262handle->flags & UV_HANDLE_EMULATE_IOCP) {
263if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
264UnregisterWait(handle->read_req.wait_handle);
265handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
266}
267if (handle->read_req.event_handle != NULL) {
268CloseHandle(handle->read_req.event_handle);
269handle->read_req.event_handle = NULL;
270}
271}
272
273uv__handle_close(handle);
274}
275
276
277/* Unlike on Unix, here we don't set SO_REUSEADDR, because it doesn't just
278* allow binding to addresses that are in use by sockets in TIME_WAIT, it
279* effectively allows 'stealing' a port which is in use by another application.
280*
281* SO_EXCLUSIVEADDRUSE is also not good here because it does check all sockets,
282* regardless of state, so we'd get an error even if the port is in use by a
283* socket in TIME_WAIT state.
284*
285* See issue #1360.
286*
287*/
288static int uv__tcp_try_bind(uv_tcp_t* handle,
289const struct sockaddr* addr,
290unsigned int addrlen,
291unsigned int flags) {
292DWORD err;
293int r;
294
295/* There is no SO_REUSEPORT on Windows, Windows only knows SO_REUSEADDR.
296* so we just return an error directly when UV_TCP_REUSEPORT is requested
297* for binding the socket. */
298if (flags & UV_TCP_REUSEPORT)
299return ERROR_NOT_SUPPORTED;
300
301if (handle->socket == INVALID_SOCKET) {
302SOCKET sock;
303
304/* Cannot set IPv6-only mode on non-IPv6 socket. */
305if ((flags & UV_TCP_IPV6ONLY) && addr->sa_family != AF_INET6)
306return ERROR_INVALID_PARAMETER;
307
308sock = socket(addr->sa_family, SOCK_STREAM, 0);
309if (sock == INVALID_SOCKET) {
310return WSAGetLastError();
311}
312
313err = uv__tcp_set_socket(handle->loop, handle, sock, addr->sa_family, 0);
314if (err) {
315closesocket(sock);
316return err;
317}
318}
319
320#ifdef IPV6_V6ONLY
321if (addr->sa_family == AF_INET6) {
322int on;
323
324on = (flags & UV_TCP_IPV6ONLY) != 0;
325
326/* TODO: how to handle errors? This may fail if there is no ipv4 stack
327* available, or when run on XP/2003 which have no support for dualstack
328* sockets. For now we're silently ignoring the error. */
329setsockopt(handle->socket,
330IPPROTO_IPV6,
331IPV6_V6ONLY,
332(const char*)&on,
333sizeof on);
334}
335#endif
336
337r = bind(handle->socket, addr, addrlen);
338
339if (r == SOCKET_ERROR) {
340err = WSAGetLastError();
341if (err == WSAEADDRINUSE) {
342/* Some errors are not to be reported until connect() or listen() */
343handle->delayed_error = err;
344} else {
345return err;
346}
347}
348
349handle->flags |= UV_HANDLE_BOUND;
350
351return 0;
352}
353
354
355static void CALLBACK post_completion(void* context, BOOLEAN timed_out) {
356uv_req_t* req;
357uv_tcp_t* handle;
358
359req = (uv_req_t*) context;
360assert(req != NULL);
361handle = (uv_tcp_t*)req->data;
362assert(handle != NULL);
363assert(!timed_out);
364
365if (!PostQueuedCompletionStatus(handle->loop->iocp,
366req->u.io.overlapped.InternalHigh,
3670,
368&req->u.io.overlapped)) {
369uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
370}
371}
372
373
374static void CALLBACK post_write_completion(void* context, BOOLEAN timed_out) {
375uv_write_t* req;
376uv_tcp_t* handle;
377
378req = (uv_write_t*) context;
379assert(req != NULL);
380handle = (uv_tcp_t*)req->handle;
381assert(handle != NULL);
382assert(!timed_out);
383
384if (!PostQueuedCompletionStatus(handle->loop->iocp,
385req->u.io.overlapped.InternalHigh,
3860,
387&req->u.io.overlapped)) {
388uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
389}
390}
391
392
393static void uv__tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
394uv_loop_t* loop = handle->loop;
395BOOL success;
396DWORD bytes;
397SOCKET accept_socket;
398short family;
399
400assert(handle->flags & UV_HANDLE_LISTENING);
401assert(req->accept_socket == INVALID_SOCKET);
402
403/* choose family and extension function */
404if (handle->flags & UV_HANDLE_IPV6) {
405family = AF_INET6;
406} else {
407family = AF_INET;
408}
409
410/* Open a socket for the accepted connection. */
411accept_socket = socket(family, SOCK_STREAM, 0);
412if (accept_socket == INVALID_SOCKET) {
413SET_REQ_ERROR(req, WSAGetLastError());
414uv__insert_pending_req(loop, (uv_req_t*)req);
415handle->reqs_pending++;
416return;
417}
418
419/* Make the socket non-inheritable */
420if (!SetHandleInformation((HANDLE) accept_socket, HANDLE_FLAG_INHERIT, 0)) {
421SET_REQ_ERROR(req, GetLastError());
422uv__insert_pending_req(loop, (uv_req_t*)req);
423handle->reqs_pending++;
424closesocket(accept_socket);
425return;
426}
427
428/* Prepare the overlapped structure. */
429memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
430if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
431assert(req->event_handle != NULL);
432req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
433}
434
435success = handle->tcp.serv.func_acceptex(handle->socket,
436accept_socket,
437(void*)req->accept_buffer,
4380,
439sizeof(struct sockaddr_storage),
440sizeof(struct sockaddr_storage),
441&bytes,
442&req->u.io.overlapped);
443
444if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
445/* Process the req without IOCP. */
446req->accept_socket = accept_socket;
447handle->reqs_pending++;
448uv__insert_pending_req(loop, (uv_req_t*)req);
449} else if (UV_SUCCEEDED_WITH_IOCP(success)) {
450/* The req will be processed with IOCP. */
451req->accept_socket = accept_socket;
452handle->reqs_pending++;
453if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
454req->wait_handle == INVALID_HANDLE_VALUE &&
455!RegisterWaitForSingleObject(&req->wait_handle,
456req->event_handle, post_completion, (void*) req,
457INFINITE, WT_EXECUTEINWAITTHREAD)) {
458SET_REQ_ERROR(req, GetLastError());
459uv__insert_pending_req(loop, (uv_req_t*)req);
460}
461} else {
462/* Make this req pending reporting an error. */
463SET_REQ_ERROR(req, WSAGetLastError());
464uv__insert_pending_req(loop, (uv_req_t*)req);
465handle->reqs_pending++;
466/* Destroy the preallocated client socket. */
467closesocket(accept_socket);
468/* Destroy the event handle */
469if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
470CloseHandle(req->event_handle);
471req->event_handle = NULL;
472}
473}
474}
475
476
477static void uv__tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
478uv_read_t* req;
479uv_buf_t buf;
480int result;
481DWORD bytes, flags;
482
483assert(handle->flags & UV_HANDLE_READING);
484assert(!(handle->flags & UV_HANDLE_READ_PENDING));
485
486req = &handle->read_req;
487memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
488
489handle->flags |= UV_HANDLE_ZERO_READ;
490buf.base = (char*) &uv_zero_;
491buf.len = 0;
492
493/* Prepare the overlapped structure. */
494memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
495if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
496assert(req->event_handle != NULL);
497req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
498}
499
500flags = 0;
501result = WSARecv(handle->socket,
502(WSABUF*)&buf,
5031,
504&bytes,
505&flags,
506&req->u.io.overlapped,
507NULL);
508
509handle->flags |= UV_HANDLE_READ_PENDING;
510handle->reqs_pending++;
511
512if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
513/* Process the req without IOCP. */
514req->u.io.overlapped.InternalHigh = bytes;
515uv__insert_pending_req(loop, (uv_req_t*)req);
516} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
517/* The req will be processed with IOCP. */
518if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
519req->wait_handle == INVALID_HANDLE_VALUE &&
520!RegisterWaitForSingleObject(&req->wait_handle,
521req->event_handle, post_completion, (void*) req,
522INFINITE, WT_EXECUTEINWAITTHREAD)) {
523SET_REQ_ERROR(req, GetLastError());
524uv__insert_pending_req(loop, (uv_req_t*)req);
525}
526} else {
527/* Make this req pending reporting an error. */
528SET_REQ_ERROR(req, WSAGetLastError());
529uv__insert_pending_req(loop, (uv_req_t*)req);
530}
531}
532
533
534int uv_tcp_close_reset(uv_tcp_t* handle, uv_close_cb close_cb) {
535struct linger l = { 1, 0 };
536
537/* Disallow setting SO_LINGER to zero due to some platform inconsistencies */
538if (uv__is_stream_shutting(handle))
539return UV_EINVAL;
540
541if (0 != setsockopt(handle->socket, SOL_SOCKET, SO_LINGER, (const char*)&l, sizeof(l)))
542return uv_translate_sys_error(WSAGetLastError());
543
544uv_close((uv_handle_t*) handle, close_cb);
545return 0;
546}
547
548
549int uv__tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
550unsigned int i, simultaneous_accepts;
551uv_tcp_accept_t* req;
552int err;
553
554assert(backlog > 0);
555
556if (handle->flags & UV_HANDLE_LISTENING) {
557handle->stream.serv.connection_cb = cb;
558}
559
560if (handle->flags & UV_HANDLE_READING) {
561return WSAEISCONN;
562}
563
564if (handle->delayed_error) {
565return handle->delayed_error;
566}
567
568if (!(handle->flags & UV_HANDLE_BOUND)) {
569err = uv__tcp_try_bind(handle,
570(const struct sockaddr*) &uv_addr_ip4_any_,
571sizeof(uv_addr_ip4_any_),
5720);
573if (err)
574return err;
575if (handle->delayed_error)
576return handle->delayed_error;
577}
578
579if (!handle->tcp.serv.func_acceptex) {
580if (!uv__get_acceptex_function(handle->socket, &handle->tcp.serv.func_acceptex)) {
581return WSAEAFNOSUPPORT;
582}
583}
584
585/* If this flag is set, we already made this listen call in xfer. */
586if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
587listen(handle->socket, backlog) == SOCKET_ERROR) {
588return WSAGetLastError();
589}
590
591handle->flags |= UV_HANDLE_LISTENING;
592handle->stream.serv.connection_cb = cb;
593INCREASE_ACTIVE_COUNT(loop, handle);
594
595simultaneous_accepts = handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT ? 1
596: uv_simultaneous_server_accepts;
597
598if (handle->tcp.serv.accept_reqs == NULL) {
599handle->tcp.serv.accept_reqs =
600uv__malloc(uv_simultaneous_server_accepts * sizeof(uv_tcp_accept_t));
601if (!handle->tcp.serv.accept_reqs) {
602uv_fatal_error(ERROR_OUTOFMEMORY, "uv__malloc");
603}
604
605for (i = 0; i < simultaneous_accepts; i++) {
606req = &handle->tcp.serv.accept_reqs[i];
607UV_REQ_INIT(req, UV_ACCEPT);
608req->accept_socket = INVALID_SOCKET;
609req->data = handle;
610
611req->wait_handle = INVALID_HANDLE_VALUE;
612if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
613req->event_handle = CreateEvent(NULL, 0, 0, NULL);
614if (req->event_handle == NULL) {
615uv_fatal_error(GetLastError(), "CreateEvent");
616}
617} else {
618req->event_handle = NULL;
619}
620
621uv__tcp_queue_accept(handle, req);
622}
623
624/* Initialize other unused requests too, because uv_tcp_endgame doesn't
625* know how many requests were initialized, so it will try to clean up
626* {uv_simultaneous_server_accepts} requests. */
627for (i = simultaneous_accepts; i < uv_simultaneous_server_accepts; i++) {
628req = &handle->tcp.serv.accept_reqs[i];
629UV_REQ_INIT(req, UV_ACCEPT);
630req->accept_socket = INVALID_SOCKET;
631req->data = handle;
632req->wait_handle = INVALID_HANDLE_VALUE;
633req->event_handle = NULL;
634}
635}
636
637return 0;
638}
639
640
641int uv__tcp_accept(uv_tcp_t* server, uv_tcp_t* client) {
642int err = 0;
643int family;
644
645uv_tcp_accept_t* req = server->tcp.serv.pending_accepts;
646
647if (!req) {
648/* No valid connections found, so we error out. */
649return WSAEWOULDBLOCK;
650}
651
652if (req->accept_socket == INVALID_SOCKET) {
653return WSAENOTCONN;
654}
655
656if (server->flags & UV_HANDLE_IPV6) {
657family = AF_INET6;
658} else {
659family = AF_INET;
660}
661
662err = uv__tcp_set_socket(client->loop,
663client,
664req->accept_socket,
665family,
6660);
667if (err) {
668closesocket(req->accept_socket);
669} else {
670uv__connection_init((uv_stream_t*) client);
671/* AcceptEx() implicitly binds the accepted socket. */
672client->flags |= UV_HANDLE_BOUND | UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
673}
674
675/* Prepare the req to pick up a new connection */
676server->tcp.serv.pending_accepts = req->next_pending;
677req->next_pending = NULL;
678req->accept_socket = INVALID_SOCKET;
679
680if (!(server->flags & UV_HANDLE_CLOSING)) {
681/* Check if we're in a middle of changing the number of pending accepts. */
682if (!(server->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING)) {
683uv__tcp_queue_accept(server, req);
684} else {
685/* We better be switching to a single pending accept. */
686assert(server->flags & UV_HANDLE_TCP_SINGLE_ACCEPT);
687
688server->tcp.serv.processed_accepts++;
689
690if (server->tcp.serv.processed_accepts >= uv_simultaneous_server_accepts) {
691server->tcp.serv.processed_accepts = 0;
692/*
693* All previously queued accept requests are now processed.
694* We now switch to queueing just a single accept.
695*/
696uv__tcp_queue_accept(server, &server->tcp.serv.accept_reqs[0]);
697server->flags &= ~UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
698server->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
699}
700}
701}
702
703return err;
704}
705
706
707int uv__tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
708uv_read_cb read_cb) {
709uv_loop_t* loop = handle->loop;
710
711handle->flags |= UV_HANDLE_READING;
712handle->read_cb = read_cb;
713handle->alloc_cb = alloc_cb;
714INCREASE_ACTIVE_COUNT(loop, handle);
715
716/* If reading was stopped and then started again, there could still be a read
717* request pending. */
718if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
719if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
720handle->read_req.event_handle == NULL) {
721handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL);
722if (handle->read_req.event_handle == NULL) {
723uv_fatal_error(GetLastError(), "CreateEvent");
724}
725}
726uv__tcp_queue_read(loop, handle);
727}
728
729return 0;
730}
731
732static int uv__is_loopback(const struct sockaddr_storage* storage) {
733const struct sockaddr_in* in4;
734const struct sockaddr_in6* in6;
735int i;
736
737if (storage->ss_family == AF_INET) {
738in4 = (const struct sockaddr_in*) storage;
739return in4->sin_addr.S_un.S_un_b.s_b1 == 127;
740}
741if (storage->ss_family == AF_INET6) {
742in6 = (const struct sockaddr_in6*) storage;
743for (i = 0; i < 7; ++i) {
744if (in6->sin6_addr.u.Word[i] != 0)
745return 0;
746}
747return in6->sin6_addr.u.Word[7] == htons(1);
748}
749return 0;
750}
751
752// Check if Windows version is 10.0.16299 or later
753static int uv__is_fast_loopback_fail_supported(void) {
754OSVERSIONINFOW os_info;
755if (!pRtlGetVersion)
756return 0;
757pRtlGetVersion(&os_info);
758if (os_info.dwMajorVersion < 10)
759return 0;
760if (os_info.dwMajorVersion > 10)
761return 1;
762if (os_info.dwMinorVersion > 0)
763return 1;
764return os_info.dwBuildNumber >= 16299;
765}
766
767static int uv__tcp_try_connect(uv_connect_t* req,
768uv_tcp_t* handle,
769const struct sockaddr* addr,
770unsigned int addrlen,
771uv_connect_cb cb) {
772uv_loop_t* loop = handle->loop;
773TCP_INITIAL_RTO_PARAMETERS retransmit_ioctl;
774const struct sockaddr* bind_addr;
775struct sockaddr_storage converted;
776BOOL success;
777DWORD bytes;
778int err;
779
780err = uv__convert_to_localhost_if_unspecified(addr, &converted);
781if (err)
782return err;
783
784if (handle->delayed_error != 0)
785goto out;
786
787if (!(handle->flags & UV_HANDLE_BOUND)) {
788if (addrlen == sizeof(uv_addr_ip4_any_)) {
789bind_addr = (const struct sockaddr*) &uv_addr_ip4_any_;
790} else if (addrlen == sizeof(uv_addr_ip6_any_)) {
791bind_addr = (const struct sockaddr*) &uv_addr_ip6_any_;
792} else {
793abort();
794}
795err = uv__tcp_try_bind(handle, bind_addr, addrlen, 0);
796if (err)
797return err;
798if (handle->delayed_error != 0)
799goto out;
800}
801
802if (!handle->tcp.conn.func_connectex) {
803if (!uv__get_connectex_function(handle->socket, &handle->tcp.conn.func_connectex)) {
804return WSAEAFNOSUPPORT;
805}
806}
807
808/* This makes connect() fail instantly if the target port on the localhost
809* is not reachable, instead of waiting for 2s. We do not care if this fails.
810* This only works on Windows version 10.0.16299 and later.
811*/
812if (uv__is_fast_loopback_fail_supported() && uv__is_loopback(&converted)) {
813memset(&retransmit_ioctl, 0, sizeof(retransmit_ioctl));
814retransmit_ioctl.Rtt = TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS;
815retransmit_ioctl.MaxSynRetransmissions = TCP_INITIAL_RTO_NO_SYN_RETRANSMISSIONS;
816WSAIoctl(handle->socket,
817SIO_TCP_INITIAL_RTO,
818&retransmit_ioctl,
819sizeof(retransmit_ioctl),
820NULL,
8210,
822&bytes,
823NULL,
824NULL);
825}
826
827out:
828
829UV_REQ_INIT(req, UV_CONNECT);
830req->handle = (uv_stream_t*) handle;
831req->cb = cb;
832memset(&req->u.io.overlapped, 0, sizeof(req->u.io.overlapped));
833
834if (handle->delayed_error != 0) {
835/* Process the req without IOCP. */
836handle->reqs_pending++;
837REGISTER_HANDLE_REQ(loop, handle, req);
838uv__insert_pending_req(loop, (uv_req_t*)req);
839return 0;
840}
841
842success = handle->tcp.conn.func_connectex(handle->socket,
843(const struct sockaddr*) &converted,
844addrlen,
845NULL,
8460,
847&bytes,
848&req->u.io.overlapped);
849
850if (UV_SUCCEEDED_WITHOUT_IOCP(success)) {
851/* Process the req without IOCP. */
852handle->reqs_pending++;
853REGISTER_HANDLE_REQ(loop, handle, req);
854uv__insert_pending_req(loop, (uv_req_t*)req);
855} else if (UV_SUCCEEDED_WITH_IOCP(success)) {
856/* The req will be processed with IOCP. */
857handle->reqs_pending++;
858REGISTER_HANDLE_REQ(loop, handle, req);
859} else {
860return WSAGetLastError();
861}
862
863return 0;
864}
865
866
867int uv_tcp_getsockname(const uv_tcp_t* handle,
868struct sockaddr* name,
869int* namelen) {
870
871return uv__getsockpeername((const uv_handle_t*) handle,
872getsockname,
873name,
874namelen,
875handle->delayed_error);
876}
877
878
879int uv_tcp_getpeername(const uv_tcp_t* handle,
880struct sockaddr* name,
881int* namelen) {
882
883return uv__getsockpeername((const uv_handle_t*) handle,
884getpeername,
885name,
886namelen,
887handle->delayed_error);
888}
889
890
891int uv__tcp_write(uv_loop_t* loop,
892uv_write_t* req,
893uv_tcp_t* handle,
894const uv_buf_t bufs[],
895unsigned int nbufs,
896uv_write_cb cb) {
897int result;
898DWORD bytes;
899
900UV_REQ_INIT(req, UV_WRITE);
901req->handle = (uv_stream_t*) handle;
902req->cb = cb;
903
904/* Prepare the overlapped structure. */
905memset(&(req->u.io.overlapped), 0, sizeof(req->u.io.overlapped));
906if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
907req->event_handle = CreateEvent(NULL, 0, 0, NULL);
908if (req->event_handle == NULL) {
909uv_fatal_error(GetLastError(), "CreateEvent");
910}
911req->u.io.overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
912req->wait_handle = INVALID_HANDLE_VALUE;
913}
914
915result = WSASend(handle->socket,
916(WSABUF*) bufs,
917nbufs,
918&bytes,
9190,
920&req->u.io.overlapped,
921NULL);
922
923if (UV_SUCCEEDED_WITHOUT_IOCP(result == 0)) {
924/* Request completed immediately. */
925req->u.io.queued_bytes = 0;
926handle->reqs_pending++;
927handle->stream.conn.write_reqs_pending++;
928REGISTER_HANDLE_REQ(loop, handle, req);
929uv__insert_pending_req(loop, (uv_req_t*) req);
930} else if (UV_SUCCEEDED_WITH_IOCP(result == 0)) {
931/* Request queued by the kernel. */
932req->u.io.queued_bytes = uv__count_bufs(bufs, nbufs);
933handle->reqs_pending++;
934handle->stream.conn.write_reqs_pending++;
935REGISTER_HANDLE_REQ(loop, handle, req);
936handle->write_queue_size += req->u.io.queued_bytes;
937if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
938!RegisterWaitForSingleObject(&req->wait_handle,
939req->event_handle, post_write_completion, (void*) req,
940INFINITE, WT_EXECUTEINWAITTHREAD | WT_EXECUTEONLYONCE)) {
941SET_REQ_ERROR(req, GetLastError());
942uv__insert_pending_req(loop, (uv_req_t*)req);
943}
944} else {
945/* Send failed due to an error, report it later */
946req->u.io.queued_bytes = 0;
947handle->reqs_pending++;
948handle->stream.conn.write_reqs_pending++;
949REGISTER_HANDLE_REQ(loop, handle, req);
950SET_REQ_ERROR(req, WSAGetLastError());
951uv__insert_pending_req(loop, (uv_req_t*) req);
952}
953
954return 0;
955}
956
957
958int uv__tcp_try_write(uv_tcp_t* handle,
959const uv_buf_t bufs[],
960unsigned int nbufs) {
961int result;
962DWORD bytes;
963
964if (handle->stream.conn.write_reqs_pending > 0)
965return UV_EAGAIN;
966
967result = WSASend(handle->socket,
968(WSABUF*) bufs,
969nbufs,
970&bytes,
9710,
972NULL,
973NULL);
974
975if (result == SOCKET_ERROR)
976return uv_translate_sys_error(WSAGetLastError());
977else
978return bytes;
979}
980
981
982void uv__process_tcp_read_req(uv_loop_t* loop, uv_tcp_t* handle,
983uv_req_t* req) {
984DWORD bytes, flags, err;
985uv_buf_t buf;
986int count;
987
988assert(handle->type == UV_TCP);
989
990handle->flags &= ~UV_HANDLE_READ_PENDING;
991
992if (!REQ_SUCCESS(req)) {
993/* An error occurred doing the read. */
994if ((handle->flags & UV_HANDLE_READING) ||
995!(handle->flags & UV_HANDLE_ZERO_READ)) {
996handle->flags &= ~UV_HANDLE_READING;
997DECREASE_ACTIVE_COUNT(loop, handle);
998buf = (handle->flags & UV_HANDLE_ZERO_READ) ?
999uv_buf_init(NULL, 0) : handle->tcp.conn.read_buffer;
1000
1001err = GET_REQ_SOCK_ERROR(req);
1002
1003if (err == WSAECONNABORTED) {
1004/* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with Unix.
1005*/
1006err = WSAECONNRESET;
1007}
1008handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
1009
1010handle->read_cb((uv_stream_t*)handle,
1011uv_translate_sys_error(err),
1012&buf);
1013}
1014} else {
1015if (!(handle->flags & UV_HANDLE_ZERO_READ)) {
1016/* The read was done with a non-zero buffer length. */
1017if (req->u.io.overlapped.InternalHigh > 0) {
1018/* Successful read */
1019handle->read_cb((uv_stream_t*)handle,
1020req->u.io.overlapped.InternalHigh,
1021&handle->tcp.conn.read_buffer);
1022/* Read again only if bytes == buf.len */
1023if (req->u.io.overlapped.InternalHigh < handle->tcp.conn.read_buffer.len) {
1024goto done;
1025}
1026} else {
1027/* Connection closed */
1028if (handle->flags & UV_HANDLE_READING) {
1029handle->flags &= ~UV_HANDLE_READING;
1030DECREASE_ACTIVE_COUNT(loop, handle);
1031}
1032
1033buf.base = 0;
1034buf.len = 0;
1035handle->read_cb((uv_stream_t*)handle, UV_EOF, &handle->tcp.conn.read_buffer);
1036goto done;
1037}
1038}
1039
1040/* Do nonblocking reads until the buffer is empty */
1041count = 32;
1042while ((handle->flags & UV_HANDLE_READING) && (count-- > 0)) {
1043buf = uv_buf_init(NULL, 0);
1044handle->alloc_cb((uv_handle_t*) handle, 65536, &buf);
1045if (buf.base == NULL || buf.len == 0) {
1046handle->read_cb((uv_stream_t*) handle, UV_ENOBUFS, &buf);
1047break;
1048}
1049assert(buf.base != NULL);
1050
1051flags = 0;
1052if (WSARecv(handle->socket,
1053(WSABUF*)&buf,
10541,
1055&bytes,
1056&flags,
1057NULL,
1058NULL) != SOCKET_ERROR) {
1059if (bytes > 0) {
1060/* Successful read */
1061handle->read_cb((uv_stream_t*)handle, bytes, &buf);
1062/* Read again only if bytes == buf.len */
1063if (bytes < buf.len) {
1064break;
1065}
1066} else {
1067/* Connection closed */
1068handle->flags &= ~UV_HANDLE_READING;
1069DECREASE_ACTIVE_COUNT(loop, handle);
1070
1071handle->read_cb((uv_stream_t*)handle, UV_EOF, &buf);
1072break;
1073}
1074} else {
1075err = WSAGetLastError();
1076if (err == WSAEWOULDBLOCK) {
1077/* Read buffer was completely empty, report a 0-byte read. */
1078handle->read_cb((uv_stream_t*)handle, 0, &buf);
1079} else {
1080/* Ouch! serious error. */
1081handle->flags &= ~UV_HANDLE_READING;
1082DECREASE_ACTIVE_COUNT(loop, handle);
1083
1084if (err == WSAECONNABORTED) {
1085/* Turn WSAECONNABORTED into UV_ECONNRESET to be consistent with
1086* Unix. */
1087err = WSAECONNRESET;
1088}
1089handle->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
1090
1091handle->read_cb((uv_stream_t*)handle,
1092uv_translate_sys_error(err),
1093&buf);
1094}
1095break;
1096}
1097}
1098
1099done:
1100/* Post another read if still reading and not closing. */
1101if ((handle->flags & UV_HANDLE_READING) &&
1102!(handle->flags & UV_HANDLE_READ_PENDING)) {
1103uv__tcp_queue_read(loop, handle);
1104}
1105}
1106
1107DECREASE_PENDING_REQ_COUNT(handle);
1108}
1109
1110
1111void uv__process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
1112uv_write_t* req) {
1113int err;
1114
1115assert(handle->type == UV_TCP);
1116
1117assert(handle->write_queue_size >= req->u.io.queued_bytes);
1118handle->write_queue_size -= req->u.io.queued_bytes;
1119
1120UNREGISTER_HANDLE_REQ(loop, handle, req);
1121
1122if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
1123if (req->wait_handle != INVALID_HANDLE_VALUE) {
1124UnregisterWait(req->wait_handle);
1125req->wait_handle = INVALID_HANDLE_VALUE;
1126}
1127if (req->event_handle != NULL) {
1128CloseHandle(req->event_handle);
1129req->event_handle = NULL;
1130}
1131}
1132
1133if (req->cb) {
1134err = uv_translate_sys_error(GET_REQ_SOCK_ERROR(req));
1135if (err == UV_ECONNABORTED) {
1136/* use UV_ECANCELED for consistency with Unix */
1137err = UV_ECANCELED;
1138}
1139req->cb(req, err);
1140}
1141
1142handle->stream.conn.write_reqs_pending--;
1143if (handle->stream.conn.write_reqs_pending == 0) {
1144if (handle->flags & UV_HANDLE_CLOSING) {
1145closesocket(handle->socket);
1146handle->socket = INVALID_SOCKET;
1147}
1148if (uv__is_stream_shutting(handle))
1149uv__process_tcp_shutdown_req(loop,
1150handle,
1151handle->stream.conn.shutdown_req);
1152}
1153
1154DECREASE_PENDING_REQ_COUNT(handle);
1155}
1156
1157
1158void uv__process_tcp_accept_req(uv_loop_t* loop, uv_tcp_t* handle,
1159uv_req_t* raw_req) {
1160uv_tcp_accept_t* req = (uv_tcp_accept_t*) raw_req;
1161int err;
1162
1163assert(handle->type == UV_TCP);
1164
1165/* If handle->accepted_socket is not a valid socket, then uv_queue_accept
1166* must have failed. This is a serious error. We stop accepting connections
1167* and report this error to the connection callback. */
1168if (req->accept_socket == INVALID_SOCKET) {
1169if (handle->flags & UV_HANDLE_LISTENING) {
1170handle->flags &= ~UV_HANDLE_LISTENING;
1171DECREASE_ACTIVE_COUNT(loop, handle);
1172if (handle->stream.serv.connection_cb) {
1173err = GET_REQ_SOCK_ERROR(req);
1174handle->stream.serv.connection_cb((uv_stream_t*)handle,
1175uv_translate_sys_error(err));
1176}
1177}
1178} else if (REQ_SUCCESS(req) &&
1179setsockopt(req->accept_socket,
1180SOL_SOCKET,
1181SO_UPDATE_ACCEPT_CONTEXT,
1182(char*)&handle->socket,
1183sizeof(handle->socket)) == 0) {
1184req->next_pending = handle->tcp.serv.pending_accepts;
1185handle->tcp.serv.pending_accepts = req;
1186
1187/* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
1188if (handle->stream.serv.connection_cb) {
1189handle->stream.serv.connection_cb((uv_stream_t*)handle, 0);
1190}
1191} else {
1192/* Error related to accepted socket is ignored because the server socket
1193* may still be healthy. If the server socket is broken uv_queue_accept
1194* will detect it. */
1195closesocket(req->accept_socket);
1196req->accept_socket = INVALID_SOCKET;
1197if (handle->flags & UV_HANDLE_LISTENING) {
1198uv__tcp_queue_accept(handle, req);
1199}
1200}
1201
1202DECREASE_PENDING_REQ_COUNT(handle);
1203}
1204
1205
1206void uv__process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
1207uv_connect_t* req) {
1208int err;
1209
1210assert(handle->type == UV_TCP);
1211
1212UNREGISTER_HANDLE_REQ(loop, handle, req);
1213
1214err = 0;
1215if (handle->delayed_error) {
1216/* To smooth over the differences between unixes errors that
1217* were reported synchronously on the first connect can be delayed
1218* until the next tick--which is now.
1219*/
1220err = handle->delayed_error;
1221handle->delayed_error = 0;
1222} else if (REQ_SUCCESS(req)) {
1223if (handle->flags & UV_HANDLE_CLOSING) {
1224/* use UV_ECANCELED for consistency with Unix */
1225err = ERROR_OPERATION_ABORTED;
1226} else if (setsockopt(handle->socket,
1227SOL_SOCKET,
1228SO_UPDATE_CONNECT_CONTEXT,
1229NULL,
12300) == 0) {
1231uv__connection_init((uv_stream_t*)handle);
1232handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
1233} else {
1234err = WSAGetLastError();
1235}
1236} else {
1237err = GET_REQ_SOCK_ERROR(req);
1238}
1239req->cb(req, uv_translate_sys_error(err));
1240
1241DECREASE_PENDING_REQ_COUNT(handle);
1242}
1243
1244
1245int uv__tcp_xfer_export(uv_tcp_t* handle,
1246int target_pid,
1247uv__ipc_socket_xfer_type_t* xfer_type,
1248uv__ipc_socket_xfer_info_t* xfer_info) {
1249if (handle->flags & UV_HANDLE_CONNECTION) {
1250*xfer_type = UV__IPC_SOCKET_XFER_TCP_CONNECTION;
1251} else {
1252*xfer_type = UV__IPC_SOCKET_XFER_TCP_SERVER;
1253/* We're about to share the socket with another process. Because this is a
1254* listening socket, we assume that the other process will be accepting
1255* connections on it. Thus, before sharing the socket with another process,
1256* we call listen here in the parent process. */
1257if (!(handle->flags & UV_HANDLE_LISTENING)) {
1258if (!(handle->flags & UV_HANDLE_BOUND)) {
1259return ERROR_NOT_SUPPORTED;
1260}
1261if (handle->delayed_error == 0 &&
1262listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
1263handle->delayed_error = WSAGetLastError();
1264}
1265}
1266}
1267
1268if (WSADuplicateSocketW(handle->socket, target_pid, &xfer_info->socket_info))
1269return WSAGetLastError();
1270xfer_info->delayed_error = handle->delayed_error;
1271
1272/* Mark the local copy of the handle as 'shared' so we behave in a way that's
1273* friendly to the process(es) that we share the socket with. */
1274handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
1275
1276return 0;
1277}
1278
1279
1280int uv__tcp_xfer_import(uv_tcp_t* tcp,
1281uv__ipc_socket_xfer_type_t xfer_type,
1282uv__ipc_socket_xfer_info_t* xfer_info) {
1283int err;
1284SOCKET socket;
1285
1286assert(xfer_type == UV__IPC_SOCKET_XFER_TCP_SERVER ||
1287xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION);
1288
1289socket = WSASocketW(FROM_PROTOCOL_INFO,
1290FROM_PROTOCOL_INFO,
1291FROM_PROTOCOL_INFO,
1292&xfer_info->socket_info,
12930,
1294WSA_FLAG_OVERLAPPED);
1295
1296if (socket == INVALID_SOCKET) {
1297return WSAGetLastError();
1298}
1299
1300err = uv__tcp_set_socket(
1301tcp->loop, tcp, socket, xfer_info->socket_info.iAddressFamily, 1);
1302if (err) {
1303closesocket(socket);
1304return err;
1305}
1306
1307tcp->delayed_error = xfer_info->delayed_error;
1308tcp->flags |= UV_HANDLE_BOUND | UV_HANDLE_SHARED_TCP_SOCKET;
1309
1310if (xfer_type == UV__IPC_SOCKET_XFER_TCP_CONNECTION) {
1311uv__connection_init((uv_stream_t*)tcp);
1312tcp->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
1313}
1314
1315return 0;
1316}
1317
1318
1319int uv_tcp_nodelay(uv_tcp_t* handle, int enable) {
1320int err;
1321
1322if (handle->socket != INVALID_SOCKET) {
1323err = uv__tcp_nodelay(handle, handle->socket, enable);
1324if (err)
1325return uv_translate_sys_error(err);
1326}
1327
1328if (enable) {
1329handle->flags |= UV_HANDLE_TCP_NODELAY;
1330} else {
1331handle->flags &= ~UV_HANDLE_TCP_NODELAY;
1332}
1333
1334return 0;
1335}
1336
1337
1338int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
1339int err;
1340
1341if (handle->socket != INVALID_SOCKET) {
1342err = uv__tcp_keepalive(handle, handle->socket, enable, delay);
1343if (err)
1344return uv_translate_sys_error(err);
1345}
1346
1347if (enable) {
1348handle->flags |= UV_HANDLE_TCP_KEEPALIVE;
1349} else {
1350handle->flags &= ~UV_HANDLE_TCP_KEEPALIVE;
1351}
1352
1353/* TODO: Store delay if handle->socket isn't created yet. */
1354
1355return 0;
1356}
1357
1358
1359int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
1360if (handle->flags & UV_HANDLE_CONNECTION) {
1361return UV_EINVAL;
1362}
1363
1364/* Check if we're already in the desired mode. */
1365if ((enable && !(handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) ||
1366(!enable && handle->flags & UV_HANDLE_TCP_SINGLE_ACCEPT)) {
1367return 0;
1368}
1369
1370/* Don't allow switching from single pending accept to many. */
1371if (enable) {
1372return UV_ENOTSUP;
1373}
1374
1375/* Check if we're in a middle of changing the number of pending accepts. */
1376if (handle->flags & UV_HANDLE_TCP_ACCEPT_STATE_CHANGING) {
1377return 0;
1378}
1379
1380handle->flags |= UV_HANDLE_TCP_SINGLE_ACCEPT;
1381
1382/* Flip the changing flag if we have already queued multiple accepts. */
1383if (handle->flags & UV_HANDLE_LISTENING) {
1384handle->flags |= UV_HANDLE_TCP_ACCEPT_STATE_CHANGING;
1385}
1386
1387return 0;
1388}
1389
1390
1391static void uv__tcp_try_cancel_reqs(uv_tcp_t* tcp) {
1392SOCKET socket;
1393int non_ifs_lsp;
1394int reading;
1395int writing;
1396
1397socket = tcp->socket;
1398reading = tcp->flags & UV_HANDLE_READ_PENDING;
1399writing = tcp->stream.conn.write_reqs_pending > 0;
1400if (!reading && !writing)
1401return;
1402
1403/* TODO: in libuv v2, keep explicit track of write_reqs, so we can cancel
1404* them each explicitly with CancelIoEx (like unix). */
1405if (reading)
1406CancelIoEx((HANDLE) socket, &tcp->read_req.u.io.overlapped);
1407if (writing)
1408CancelIo((HANDLE) socket);
1409
1410/* Check if we have any non-IFS LSPs stacked on top of TCP */
1411non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
1412uv_tcp_non_ifs_lsp_ipv4;
1413
1414/* If there are non-ifs LSPs then try to obtain a base handle for the socket.
1415*/
1416if (non_ifs_lsp) {
1417DWORD bytes;
1418if (WSAIoctl(socket,
1419SIO_BASE_HANDLE,
1420NULL,
14210,
1422&socket,
1423sizeof socket,
1424&bytes,
1425NULL,
1426NULL) != 0) {
1427/* Failed. We can't do CancelIo. */
1428return;
1429}
1430}
1431
1432assert(socket != 0 && socket != INVALID_SOCKET);
1433
1434if (socket != tcp->socket) {
1435if (reading)
1436CancelIoEx((HANDLE) socket, &tcp->read_req.u.io.overlapped);
1437if (writing)
1438CancelIo((HANDLE) socket);
1439}
1440}
1441
1442
1443void uv__tcp_close(uv_loop_t* loop, uv_tcp_t* tcp) {
1444if (tcp->flags & UV_HANDLE_CONNECTION) {
1445if (tcp->flags & UV_HANDLE_READING) {
1446uv_read_stop((uv_stream_t*) tcp);
1447}
1448uv__tcp_try_cancel_reqs(tcp);
1449} else {
1450if (tcp->tcp.serv.accept_reqs != NULL) {
1451/* First close the incoming sockets to cancel the accept operations before
1452* we free their resources. */
1453unsigned int i;
1454for (i = 0; i < uv_simultaneous_server_accepts; i++) {
1455uv_tcp_accept_t* req = &tcp->tcp.serv.accept_reqs[i];
1456if (req->accept_socket != INVALID_SOCKET) {
1457closesocket(req->accept_socket);
1458req->accept_socket = INVALID_SOCKET;
1459}
1460}
1461}
1462assert(!(tcp->flags & UV_HANDLE_READING));
1463}
1464
1465if (tcp->flags & UV_HANDLE_LISTENING) {
1466tcp->flags &= ~UV_HANDLE_LISTENING;
1467DECREASE_ACTIVE_COUNT(loop, tcp);
1468}
1469
1470tcp->flags &= ~(UV_HANDLE_READABLE | UV_HANDLE_WRITABLE);
1471uv__handle_closing(tcp);
1472
1473/* If any overlapped req failed to cancel, calling `closesocket` now would
1474* cause Win32 to send an RST packet. Try to avoid that for writes, if
1475* possibly applicable, by waiting to process the completion notifications
1476* first (which typically should be cancellations). There's not much we can
1477* do about canceled reads, which also will generate an RST packet. */
1478if (!(tcp->flags & UV_HANDLE_CONNECTION) ||
1479tcp->stream.conn.write_reqs_pending == 0) {
1480closesocket(tcp->socket);
1481tcp->socket = INVALID_SOCKET;
1482}
1483
1484if (tcp->reqs_pending == 0)
1485uv__want_endgame(loop, (uv_handle_t*) tcp);
1486}
1487
1488
1489int uv_tcp_open(uv_tcp_t* handle, uv_os_sock_t sock) {
1490WSAPROTOCOL_INFOW protocol_info;
1491int opt_len;
1492int err;
1493struct sockaddr_storage saddr;
1494int saddr_len;
1495
1496/* Detect the address family of the socket. */
1497opt_len = (int) sizeof protocol_info;
1498if (getsockopt(sock,
1499SOL_SOCKET,
1500SO_PROTOCOL_INFOW,
1501(char*) &protocol_info,
1502&opt_len) == SOCKET_ERROR) {
1503return uv_translate_sys_error(GetLastError());
1504}
1505
1506err = uv__tcp_set_socket(handle->loop,
1507handle,
1508sock,
1509protocol_info.iAddressFamily,
15101);
1511if (err) {
1512return uv_translate_sys_error(err);
1513}
1514
1515/* Support already active socket. */
1516saddr_len = sizeof(saddr);
1517if (!uv_tcp_getsockname(handle, (struct sockaddr*) &saddr, &saddr_len)) {
1518/* Socket is already bound. */
1519handle->flags |= UV_HANDLE_BOUND;
1520saddr_len = sizeof(saddr);
1521if (!uv_tcp_getpeername(handle, (struct sockaddr*) &saddr, &saddr_len)) {
1522/* Socket is already connected. */
1523uv__connection_init((uv_stream_t*) handle);
1524handle->flags |= UV_HANDLE_READABLE | UV_HANDLE_WRITABLE;
1525}
1526}
1527
1528return 0;
1529}
1530
1531
1532/* This function is an egress point, i.e. it returns libuv errors rather than
1533* system errors.
1534*/
1535int uv__tcp_bind(uv_tcp_t* handle,
1536const struct sockaddr* addr,
1537unsigned int addrlen,
1538unsigned int flags) {
1539int err;
1540
1541err = uv__tcp_try_bind(handle, addr, addrlen, flags);
1542if (err)
1543return uv_translate_sys_error(err);
1544
1545return 0;
1546}
1547
1548
1549/* This function is an egress point, i.e. it returns libuv errors rather than
1550* system errors.
1551*/
1552int uv__tcp_connect(uv_connect_t* req,
1553uv_tcp_t* handle,
1554const struct sockaddr* addr,
1555unsigned int addrlen,
1556uv_connect_cb cb) {
1557int err;
1558
1559err = uv__tcp_try_connect(req, handle, addr, addrlen, cb);
1560if (err)
1561return uv_translate_sys_error(err);
1562
1563return 0;
1564}
1565
1566
1567int uv_socketpair(int type, int protocol, uv_os_sock_t fds[2], int flags0, int flags1) {
1568SOCKET server = INVALID_SOCKET;
1569SOCKET client0 = INVALID_SOCKET;
1570SOCKET client1 = INVALID_SOCKET;
1571SOCKADDR_IN name;
1572LPFN_ACCEPTEX func_acceptex;
1573WSAOVERLAPPED overlap;
1574char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
1575int namelen;
1576int err;
1577DWORD bytes;
1578DWORD flags;
1579DWORD client0_flags = WSA_FLAG_NO_HANDLE_INHERIT;
1580DWORD client1_flags = WSA_FLAG_NO_HANDLE_INHERIT;
1581
1582if (flags0 & UV_NONBLOCK_PIPE)
1583client0_flags |= WSA_FLAG_OVERLAPPED;
1584if (flags1 & UV_NONBLOCK_PIPE)
1585client1_flags |= WSA_FLAG_OVERLAPPED;
1586
1587server = WSASocketW(AF_INET, type, protocol, NULL, 0,
1588WSA_FLAG_OVERLAPPED | WSA_FLAG_NO_HANDLE_INHERIT);
1589if (server == INVALID_SOCKET)
1590goto wsaerror;
1591if (!SetHandleInformation((HANDLE) server, HANDLE_FLAG_INHERIT, 0))
1592goto error;
1593name.sin_family = AF_INET;
1594name.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
1595name.sin_port = 0;
1596if (bind(server, (SOCKADDR*) &name, sizeof(name)) != 0)
1597goto wsaerror;
1598if (listen(server, 1) != 0)
1599goto wsaerror;
1600namelen = sizeof(name);
1601if (getsockname(server, (SOCKADDR*) &name, &namelen) != 0)
1602goto wsaerror;
1603client0 = WSASocketW(AF_INET, type, protocol, NULL, 0, client0_flags);
1604if (client0 == INVALID_SOCKET)
1605goto wsaerror;
1606if (!SetHandleInformation((HANDLE) client0, HANDLE_FLAG_INHERIT, 0))
1607goto error;
1608if (connect(client0, (SOCKADDR*) &name, sizeof(name)) != 0)
1609goto wsaerror;
1610client1 = WSASocketW(AF_INET, type, protocol, NULL, 0, client1_flags);
1611if (client1 == INVALID_SOCKET)
1612goto wsaerror;
1613if (!SetHandleInformation((HANDLE) client1, HANDLE_FLAG_INHERIT, 0))
1614goto error;
1615if (!uv__get_acceptex_function(server, &func_acceptex)) {
1616err = WSAEAFNOSUPPORT;
1617goto cleanup;
1618}
1619memset(&overlap, 0, sizeof(overlap));
1620if (!func_acceptex(server,
1621client1,
1622accept_buffer,
16230,
1624sizeof(struct sockaddr_storage),
1625sizeof(struct sockaddr_storage),
1626&bytes,
1627&overlap)) {
1628err = WSAGetLastError();
1629if (err == ERROR_IO_PENDING) {
1630/* Result should complete immediately, since we already called connect,
1631* but empirically, we sometimes have to poll the kernel a couple times
1632* until it notices that. */
1633while (!WSAGetOverlappedResult(client1, &overlap, &bytes, FALSE, &flags)) {
1634err = WSAGetLastError();
1635if (err != WSA_IO_INCOMPLETE)
1636goto cleanup;
1637SwitchToThread();
1638}
1639}
1640else {
1641goto cleanup;
1642}
1643}
1644if (setsockopt(client1, SOL_SOCKET, SO_UPDATE_ACCEPT_CONTEXT,
1645(char*) &server, sizeof(server)) != 0) {
1646goto wsaerror;
1647}
1648
1649closesocket(server);
1650
1651fds[0] = client0;
1652fds[1] = client1;
1653
1654return 0;
1655
1656wsaerror:
1657err = WSAGetLastError();
1658goto cleanup;
1659
1660error:
1661err = GetLastError();
1662goto cleanup;
1663
1664cleanup:
1665if (server != INVALID_SOCKET)
1666closesocket(server);
1667if (client0 != INVALID_SOCKET)
1668closesocket(client0);
1669if (client1 != INVALID_SOCKET)
1670closesocket(client1);
1671
1672assert(err);
1673return uv_translate_sys_error(err);
1674}
1675