1 /**
2 * @file
3 * Sockets BSD-Like API module
4 */
5
6 /*
7 * Copyright (c) 2001-2004 Swedish Institute of Computer Science.
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without modification,
11 * are permitted provided that the following conditions are met:
12 *
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
18 * 3. The name of the author may not be used to endorse or promote products
19 * derived from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
22 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
23 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
24 * SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
26 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
29 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
30 * OF SUCH DAMAGE.
31 *
32 * This file is part of the lwIP TCP/IP stack.
33 *
34 * Author: Adam Dunkels <[email protected]>
35 *
36 * Improved by Marc Boucher <[email protected]> and David Haas <[email protected]>
37 *
38 */
39
40 #include "lwip/opt.h"
41
42 #if LWIP_SOCKET /* don't build if not configured for use in lwipopts.h */
43
44 #include "lwip/sockets.h"
45 #include "lwip/priv/sockets_priv.h"
46 #include "lwip/api.h"
47 #include "lwip/igmp.h"
48 #include "lwip/inet.h"
49 #include "lwip/tcp.h"
50 #include "lwip/raw.h"
51 #include "lwip/udp.h"
52 #include "lwip/memp.h"
53 #include "lwip/pbuf.h"
54 #include "lwip/netif.h"
55 #include "lwip/priv/tcpip_priv.h"
56 #include "lwip/mld6.h"
57 #if LWIP_CHECKSUM_ON_COPY
58 #include "lwip/inet_chksum.h"
59 #endif
60
61 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
62 #include <stdarg.h>
63 #endif
64
65 #include <string.h>
66
67 #ifdef LWIP_HOOK_FILENAME
68 #include LWIP_HOOK_FILENAME
69 #endif
70
71 /* If the netconn API is not required publicly, then we include the necessary
72 files here to get the implementation */
73 #if !LWIP_NETCONN
74 #undef LWIP_NETCONN
75 #define LWIP_NETCONN 1
76 #include "api_msg.c"
77 #include "api_lib.c"
78 #include "netbuf.c"
79 #undef LWIP_NETCONN
80 #define LWIP_NETCONN 0
81 #endif
82
83 #define API_SELECT_CB_VAR_REF(name) API_VAR_REF(name)
84 #define API_SELECT_CB_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_select_cb, name)
85 #define API_SELECT_CB_VAR_ALLOC(name, retblock) API_VAR_ALLOC_EXT(struct lwip_select_cb, MEMP_SELECT_CB, name, retblock)
86 #define API_SELECT_CB_VAR_FREE(name) API_VAR_FREE(MEMP_SELECT_CB, name)
87
88 #if LWIP_IPV4
89 #define IP4ADDR_PORT_TO_SOCKADDR(sin, ipaddr, port) do { \
90 (sin)->sin_len = sizeof(struct sockaddr_in); \
91 (sin)->sin_family = AF_INET; \
92 (sin)->sin_port = lwip_htons((port)); \
93 inet_addr_from_ip4addr(&(sin)->sin_addr, ipaddr); \
94 memset((sin)->sin_zero, 0, SIN_ZERO_LEN); }while(0)
95 #define SOCKADDR4_TO_IP4ADDR_PORT(sin, ipaddr, port) do { \
96 inet_addr_to_ip4addr(ip_2_ip4(ipaddr), &((sin)->sin_addr)); \
97 (port) = lwip_ntohs((sin)->sin_port); }while(0)
98 #endif /* LWIP_IPV4 */
99
100 #if LWIP_IPV6
101 #define IP6ADDR_PORT_TO_SOCKADDR(sin6, ipaddr, port) do { \
102 (sin6)->sin6_len = sizeof(struct sockaddr_in6); \
103 (sin6)->sin6_family = AF_INET6; \
104 (sin6)->sin6_port = lwip_htons((port)); \
105 (sin6)->sin6_flowinfo = 0; \
106 inet6_addr_from_ip6addr(&(sin6)->sin6_addr, ipaddr); \
107 (sin6)->sin6_scope_id = ip6_addr_zone(ipaddr); }while(0)
108 #define SOCKADDR6_TO_IP6ADDR_PORT(sin6, ipaddr, port) do { \
109 inet6_addr_to_ip6addr(ip_2_ip6(ipaddr), &((sin6)->sin6_addr)); \
110 if (ip6_addr_has_scope(ip_2_ip6(ipaddr), IP6_UNKNOWN)) { \
111 ip6_addr_set_zone(ip_2_ip6(ipaddr), (u8_t)((sin6)->sin6_scope_id)); \
112 } \
113 (port) = lwip_ntohs((sin6)->sin6_port); }while(0)
114 #endif /* LWIP_IPV6 */
115
116 #if LWIP_IPV4 && LWIP_IPV6
117 static void sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port);
118
119 #define IS_SOCK_ADDR_LEN_VALID(namelen) (((namelen) == sizeof(struct sockaddr_in)) || \
120 ((namelen) == sizeof(struct sockaddr_in6)))
121 #define IS_SOCK_ADDR_TYPE_VALID(name) (((name)->sa_family == AF_INET) || \
122 ((name)->sa_family == AF_INET6))
123 #define SOCK_ADDR_TYPE_MATCH(name, sock) \
124 ((((name)->sa_family == AF_INET) && !(NETCONNTYPE_ISIPV6((sock)->conn->type))) || \
125 (((name)->sa_family == AF_INET6) && (NETCONNTYPE_ISIPV6((sock)->conn->type))))
126 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) do { \
127 if (IP_IS_ANY_TYPE_VAL(*ipaddr) || IP_IS_V6_VAL(*ipaddr)) { \
128 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port); \
129 } else { \
130 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port); \
131 } } while(0)
132 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) sockaddr_to_ipaddr_port(sockaddr, ipaddr, &(port))
133 #define DOMAIN_TO_NETCONN_TYPE(domain, type) (((domain) == AF_INET) ? \
134 (type) : (enum netconn_type)((type) | NETCONN_TYPE_IPV6))
135 #elif LWIP_IPV6 /* LWIP_IPV4 && LWIP_IPV6 */
136 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in6))
137 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET6)
138 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
139 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
140 IP6ADDR_PORT_TO_SOCKADDR((struct sockaddr_in6*)(void*)(sockaddr), ip_2_ip6(ipaddr), port)
141 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
142 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6*)(const void*)(sockaddr), ipaddr, port)
143 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
144 #else /*-> LWIP_IPV4: LWIP_IPV4 && LWIP_IPV6 */
145 #define IS_SOCK_ADDR_LEN_VALID(namelen) ((namelen) == sizeof(struct sockaddr_in))
146 #define IS_SOCK_ADDR_TYPE_VALID(name) ((name)->sa_family == AF_INET)
147 #define SOCK_ADDR_TYPE_MATCH(name, sock) 1
148 #define IPADDR_PORT_TO_SOCKADDR(sockaddr, ipaddr, port) \
149 IP4ADDR_PORT_TO_SOCKADDR((struct sockaddr_in*)(void*)(sockaddr), ip_2_ip4(ipaddr), port)
150 #define SOCKADDR_TO_IPADDR_PORT(sockaddr, ipaddr, port) \
151 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in*)(const void*)(sockaddr), ipaddr, port)
152 #define DOMAIN_TO_NETCONN_TYPE(domain, netconn_type) (netconn_type)
153 #endif /* LWIP_IPV6 */
154
155 #define IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) (((name)->sa_family == AF_UNSPEC) || \
156 IS_SOCK_ADDR_TYPE_VALID(name))
157 #define SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock) (((name)->sa_family == AF_UNSPEC) || \
158 SOCK_ADDR_TYPE_MATCH(name, sock))
159 #define IS_SOCK_ADDR_ALIGNED(name) ((((mem_ptr_t)(name)) % 4) == 0)
160
161
162 #define LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype) do { if ((optlen) < sizeof(opttype)) { done_socket(sock); return EINVAL; }}while(0)
163 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, opttype) do { \
164 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
165 if ((sock)->conn == NULL) { done_socket(sock); return EINVAL; } }while(0)
166 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype) do { \
167 LWIP_SOCKOPT_CHECK_OPTLEN(sock, optlen, opttype); \
168 if (((sock)->conn == NULL) || ((sock)->conn->pcb.tcp == NULL)) { done_socket(sock); return EINVAL; } }while(0)
169 #define LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, opttype, netconntype) do { \
170 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, opttype); \
171 if (NETCONNTYPE_GROUP(netconn_type((sock)->conn)) != netconntype) { done_socket(sock); return ENOPROTOOPT; } }while(0)
172
173
174 #define LWIP_SETGETSOCKOPT_DATA_VAR_REF(name) API_VAR_REF(name)
175 #define LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(name) API_VAR_DECLARE(struct lwip_setgetsockopt_data, name)
176 #define LWIP_SETGETSOCKOPT_DATA_VAR_FREE(name) API_VAR_FREE(MEMP_SOCKET_SETGETSOCKOPT_DATA, name)
177 #if LWIP_MPU_COMPATIBLE
178 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock) do { \
179 name = (struct lwip_setgetsockopt_data *)memp_malloc(MEMP_SOCKET_SETGETSOCKOPT_DATA); \
180 if (name == NULL) { \
181 set_errno(ENOMEM); \
182 done_socket(sock); \
183 return -1; \
184 } }while(0)
185 #else /* LWIP_MPU_COMPATIBLE */
186 #define LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(name, sock)
187 #endif /* LWIP_MPU_COMPATIBLE */
188
189 #if LWIP_SO_SNDRCVTIMEO_NONSTANDARD
190 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE int
191 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) (*(int *)(optval) = (val))
192 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((long)*(const int*)(optval))
193 #else
194 #define LWIP_SO_SNDRCVTIMEO_OPTTYPE struct timeval
195 #define LWIP_SO_SNDRCVTIMEO_SET(optval, val) do { \
196 u32_t loc = (val); \
197 ((struct timeval *)(optval))->tv_sec = (long)((loc) / 1000U); \
198 ((struct timeval *)(optval))->tv_usec = (long)(((loc) % 1000U) * 1000U); }while(0)
199 #define LWIP_SO_SNDRCVTIMEO_GET_MS(optval) ((((const struct timeval *)(optval))->tv_sec * 1000) + (((const struct timeval *)(optval))->tv_usec / 1000))
200 #endif
201
202
203 /** A struct sockaddr replacement that has the same alignment as sockaddr_in/
204 * sockaddr_in6 if instantiated.
205 */
206 union sockaddr_aligned {
207 struct sockaddr sa;
208 #if LWIP_IPV6
209 struct sockaddr_in6 sin6;
210 #endif /* LWIP_IPV6 */
211 #if LWIP_IPV4
212 struct sockaddr_in sin;
213 #endif /* LWIP_IPV4 */
214 };
215
216 /* Define the number of IPv4 multicast memberships, default is one per socket */
217 #ifndef LWIP_SOCKET_MAX_MEMBERSHIPS
218 #define LWIP_SOCKET_MAX_MEMBERSHIPS NUM_SOCKETS
219 #endif
220
221 #if LWIP_IGMP
222 /* This is to keep track of IP_ADD_MEMBERSHIP calls to drop the membership when
223 a socket is closed */
224 struct lwip_socket_multicast_pair {
225 /** the socket */
226 struct lwip_sock *sock;
227 /** the interface address */
228 ip4_addr_t if_addr;
229 /** the group address */
230 ip4_addr_t multi_addr;
231 };
232
233 static struct lwip_socket_multicast_pair socket_ipv4_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
234
235 static int lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
236 static void lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr);
237 static void lwip_socket_drop_registered_memberships(int s);
238 #endif /* LWIP_IGMP */
239
240 #if LWIP_IPV6_MLD
241 /* This is to keep track of IP_JOIN_GROUP calls to drop the membership when
242 a socket is closed */
243 struct lwip_socket_multicast_mld6_pair {
244 /** the socket */
245 struct lwip_sock *sock;
246 /** the interface index */
247 u8_t if_idx;
248 /** the group address */
249 ip6_addr_t multi_addr;
250 };
251
252 static struct lwip_socket_multicast_mld6_pair socket_ipv6_multicast_memberships[LWIP_SOCKET_MAX_MEMBERSHIPS];
253
254 static int lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
255 static void lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr);
256 static void lwip_socket_drop_registered_mld6_memberships(int s);
257 #endif /* LWIP_IPV6_MLD */
258
259 /** The global array of available sockets */
260 static struct lwip_sock sockets[NUM_SOCKETS];
261
262 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
263 #if LWIP_TCPIP_CORE_LOCKING
264 /* protect the select_cb_list using core lock */
265 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev)
266 #define LWIP_SOCKET_SELECT_PROTECT(lev) LOCK_TCPIP_CORE()
267 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) UNLOCK_TCPIP_CORE()
268 #else /* LWIP_TCPIP_CORE_LOCKING */
269 /* protect the select_cb_list using SYS_LIGHTWEIGHT_PROT */
270 #define LWIP_SOCKET_SELECT_DECL_PROTECT(lev) SYS_ARCH_DECL_PROTECT(lev)
271 #define LWIP_SOCKET_SELECT_PROTECT(lev) SYS_ARCH_PROTECT(lev)
272 #define LWIP_SOCKET_SELECT_UNPROTECT(lev) SYS_ARCH_UNPROTECT(lev)
273 /** This counter is increased from lwip_select when the list is changed
274 and checked in select_check_waiters to see if it has changed. */
275 static volatile int select_cb_ctr;
276 #endif /* LWIP_TCPIP_CORE_LOCKING */
277 /** The global list of tasks waiting for select */
278 static struct lwip_select_cb *select_cb_list;
279 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
280
281 /* Forward declaration of some functions */
282 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
283 static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len);
284 #define DEFAULT_SOCKET_EVENTCB event_callback
285 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent);
286 #else
287 #define DEFAULT_SOCKET_EVENTCB NULL
288 #endif
289 #if !LWIP_TCPIP_CORE_LOCKING
290 static void lwip_getsockopt_callback(void *arg);
291 static void lwip_setsockopt_callback(void *arg);
292 #endif
293 static int lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen);
294 static int lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen);
295 static int free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
296 union lwip_sock_lastdata *lastdata);
297 static void free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata);
298
299 #if LWIP_IPV4 && LWIP_IPV6
300 static void
sockaddr_to_ipaddr_port(const struct sockaddr * sockaddr,ip_addr_t * ipaddr,u16_t * port)301 sockaddr_to_ipaddr_port(const struct sockaddr *sockaddr, ip_addr_t *ipaddr, u16_t *port)
302 {
303 if ((sockaddr->sa_family) == AF_INET6) {
304 SOCKADDR6_TO_IP6ADDR_PORT((const struct sockaddr_in6 *)(const void *)(sockaddr), ipaddr, *port);
305 ipaddr->type = IPADDR_TYPE_V6;
306 } else {
307 SOCKADDR4_TO_IP4ADDR_PORT((const struct sockaddr_in *)(const void *)(sockaddr), ipaddr, *port);
308 ipaddr->type = IPADDR_TYPE_V4;
309 }
310 }
311 #endif /* LWIP_IPV4 && LWIP_IPV6 */
312
313 /** LWIP_NETCONN_SEM_PER_THREAD==1: initialize thread-local semaphore */
314 void
lwip_socket_thread_init(void)315 lwip_socket_thread_init(void)
316 {
317 netconn_thread_init();
318 }
319
320 /** LWIP_NETCONN_SEM_PER_THREAD==1: destroy thread-local semaphore */
321 void
lwip_socket_thread_cleanup(void)322 lwip_socket_thread_cleanup(void)
323 {
324 netconn_thread_cleanup();
325 }
326
327 #if LWIP_NETCONN_FULLDUPLEX
328 /* Thread-safe increment of sock->fd_used, with overflow check */
329 static int
sock_inc_used(struct lwip_sock * sock)330 sock_inc_used(struct lwip_sock *sock)
331 {
332 int ret;
333 SYS_ARCH_DECL_PROTECT(lev);
334
335 LWIP_ASSERT("sock != NULL", sock != NULL);
336
337 SYS_ARCH_PROTECT(lev);
338 if (sock->fd_free_pending) {
339 /* prevent new usage of this socket if free is pending */
340 ret = 0;
341 } else {
342 ++sock->fd_used;
343 ret = 1;
344 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
345 }
346 SYS_ARCH_UNPROTECT(lev);
347 return ret;
348 }
349
350 /* Like sock_inc_used(), but called under SYS_ARCH_PROTECT lock. */
351 static int
sock_inc_used_locked(struct lwip_sock * sock)352 sock_inc_used_locked(struct lwip_sock *sock)
353 {
354 LWIP_ASSERT("sock != NULL", sock != NULL);
355
356 if (sock->fd_free_pending) {
357 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
358 return 0;
359 }
360
361 ++sock->fd_used;
362 LWIP_ASSERT("sock->fd_used != 0", sock->fd_used != 0);
363 return 1;
364 }
365
366 /* In full-duplex mode,sock->fd_used != 0 prevents a socket descriptor from being
367 * released (and possibly reused) when used from more than one thread
368 * (e.g. read-while-write or close-while-write, etc)
369 * This function is called at the end of functions using (try)get_socket*().
370 */
371 static void
done_socket(struct lwip_sock * sock)372 done_socket(struct lwip_sock *sock)
373 {
374 int freed = 0;
375 int is_tcp = 0;
376 struct netconn *conn = NULL;
377 union lwip_sock_lastdata lastdata;
378 SYS_ARCH_DECL_PROTECT(lev);
379 LWIP_ASSERT("sock != NULL", sock != NULL);
380
381 SYS_ARCH_PROTECT(lev);
382 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
383 if (--sock->fd_used == 0) {
384 if (sock->fd_free_pending) {
385 /* free the socket */
386 sock->fd_used = 1;
387 is_tcp = sock->fd_free_pending & LWIP_SOCK_FD_FREE_TCP;
388 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
389 }
390 }
391 SYS_ARCH_UNPROTECT(lev);
392
393 if (freed) {
394 free_socket_free_elements(is_tcp, conn, &lastdata);
395 }
396 }
397
398 #else /* LWIP_NETCONN_FULLDUPLEX */
399 #define sock_inc_used(sock) 1
400 #define sock_inc_used_locked(sock) 1
401 #define done_socket(sock)
402 #endif /* LWIP_NETCONN_FULLDUPLEX */
403
404 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
405 static struct lwip_sock *
tryget_socket_unconn_nouse(int fd)406 tryget_socket_unconn_nouse(int fd)
407 {
408 int s = fd - LWIP_SOCKET_OFFSET;
409 if ((s < 0) || (s >= NUM_SOCKETS)) {
410 LWIP_DEBUGF(SOCKETS_DEBUG, ("tryget_socket_unconn(%d): invalid\n", fd));
411 return NULL;
412 }
413 return &sockets[s];
414 }
415
416 struct lwip_sock *
lwip_socket_dbg_get_socket(int fd)417 lwip_socket_dbg_get_socket(int fd)
418 {
419 return tryget_socket_unconn_nouse(fd);
420 }
421
422 /* Translate a socket 'int' into a pointer (only fails if the index is invalid) */
423 static struct lwip_sock *
tryget_socket_unconn(int fd)424 tryget_socket_unconn(int fd)
425 {
426 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
427 if (ret != NULL) {
428 if (!sock_inc_used(ret)) {
429 return NULL;
430 }
431 }
432 return ret;
433 }
434
435 /* Like tryget_socket_unconn(), but called under SYS_ARCH_PROTECT lock. */
436 static struct lwip_sock *
tryget_socket_unconn_locked(int fd)437 tryget_socket_unconn_locked(int fd)
438 {
439 struct lwip_sock *ret = tryget_socket_unconn_nouse(fd);
440 if (ret != NULL) {
441 if (!sock_inc_used_locked(ret)) {
442 return NULL;
443 }
444 }
445 return ret;
446 }
447
448 /**
449 * Same as get_socket but doesn't set errno
450 *
451 * @param fd externally used socket index
452 * @return struct lwip_sock for the socket or NULL if not found
453 */
454 static struct lwip_sock *
tryget_socket(int fd)455 tryget_socket(int fd)
456 {
457 struct lwip_sock *sock = tryget_socket_unconn(fd);
458 if (sock != NULL) {
459 if (sock->conn) {
460 return sock;
461 }
462 done_socket(sock);
463 }
464 return NULL;
465 }
466
467 /**
468 * Map a externally used socket index to the internal socket representation.
469 *
470 * @param fd externally used socket index
471 * @return struct lwip_sock for the socket or NULL if not found
472 */
473 static struct lwip_sock *
get_socket(int fd)474 get_socket(int fd)
475 {
476 struct lwip_sock *sock = tryget_socket(fd);
477 if (!sock) {
478 if ((fd < LWIP_SOCKET_OFFSET) || (fd >= (LWIP_SOCKET_OFFSET + NUM_SOCKETS))) {
479 LWIP_DEBUGF(SOCKETS_DEBUG, ("get_socket(%d): invalid\n", fd));
480 }
481 set_errno(EBADF);
482 return NULL;
483 }
484 return sock;
485 }
486
487 /**
488 * Allocate a new socket for a given netconn.
489 *
490 * @param newconn the netconn for which to allocate a socket
491 * @param accepted 1 if socket has been created by accept(),
492 * 0 if socket has been created by socket()
493 * @return the index of the new socket; -1 on error
494 */
495 static int
alloc_socket(struct netconn * newconn,int accepted)496 alloc_socket(struct netconn *newconn, int accepted)
497 {
498 int i;
499 SYS_ARCH_DECL_PROTECT(lev);
500 LWIP_UNUSED_ARG(accepted);
501
502 /* allocate a new socket identifier */
503 for (i = 0; i < NUM_SOCKETS; ++i) {
504 /* Protect socket array */
505 SYS_ARCH_PROTECT(lev);
506 if (!sockets[i].conn) {
507 #if LWIP_NETCONN_FULLDUPLEX
508 if (sockets[i].fd_used) {
509 SYS_ARCH_UNPROTECT(lev);
510 continue;
511 }
512 sockets[i].fd_used = 1;
513 sockets[i].fd_free_pending = 0;
514 #endif
515 sockets[i].conn = newconn;
516 /* The socket is not yet known to anyone, so no need to protect
517 after having marked it as used. */
518 SYS_ARCH_UNPROTECT(lev);
519 sockets[i].lastdata.pbuf = NULL;
520 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
521 LWIP_ASSERT("sockets[i].select_waiting == 0", sockets[i].select_waiting == 0);
522 sockets[i].rcvevent = 0;
523 /* TCP sendbuf is empty, but the socket is not yet writable until connected
524 * (unless it has been created by accept()). */
525 sockets[i].sendevent = (NETCONNTYPE_GROUP(newconn->type) == NETCONN_TCP ? (accepted != 0) : 1);
526 sockets[i].errevent = 0;
527 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
528 return i + LWIP_SOCKET_OFFSET;
529 }
530 SYS_ARCH_UNPROTECT(lev);
531 }
532 return -1;
533 }
534
535 /** Free a socket (under lock)
536 *
537 * @param sock the socket to free
538 * @param is_tcp != 0 for TCP sockets, used to free lastdata
539 * @param conn the socekt's netconn is stored here, must be freed externally
540 * @param lastdata lastdata is stored here, must be freed externally
541 */
542 static int
free_socket_locked(struct lwip_sock * sock,int is_tcp,struct netconn ** conn,union lwip_sock_lastdata * lastdata)543 free_socket_locked(struct lwip_sock *sock, int is_tcp, struct netconn **conn,
544 union lwip_sock_lastdata *lastdata)
545 {
546 #if LWIP_NETCONN_FULLDUPLEX
547 LWIP_ASSERT("sock->fd_used > 0", sock->fd_used > 0);
548 sock->fd_used--;
549 if (sock->fd_used > 0) {
550 sock->fd_free_pending = LWIP_SOCK_FD_FREE_FREE | (is_tcp ? LWIP_SOCK_FD_FREE_TCP : 0);
551 return 0;
552 }
553 #else /* LWIP_NETCONN_FULLDUPLEX */
554 LWIP_UNUSED_ARG(is_tcp);
555 #endif /* LWIP_NETCONN_FULLDUPLEX */
556
557 *lastdata = sock->lastdata;
558 sock->lastdata.pbuf = NULL;
559 *conn = sock->conn;
560 sock->conn = NULL;
561 return 1;
562 }
563
564 /** Free a socket's leftover members.
565 */
566 static void
free_socket_free_elements(int is_tcp,struct netconn * conn,union lwip_sock_lastdata * lastdata)567 free_socket_free_elements(int is_tcp, struct netconn *conn, union lwip_sock_lastdata *lastdata)
568 {
569 if (lastdata->pbuf != NULL) {
570 if (is_tcp) {
571 pbuf_free(lastdata->pbuf);
572 } else {
573 netbuf_delete(lastdata->netbuf);
574 }
575 }
576 if (conn != NULL) {
577 /* netconn_prepare_delete() has already been called, here we only free the conn */
578 netconn_delete(conn);
579 }
580 }
581
582 /** Free a socket. The socket's netconn must have been
583 * delete before!
584 *
585 * @param sock the socket to free
586 * @param is_tcp != 0 for TCP sockets, used to free lastdata
587 */
588 static void
free_socket(struct lwip_sock * sock,int is_tcp)589 free_socket(struct lwip_sock *sock, int is_tcp)
590 {
591 int freed;
592 struct netconn *conn;
593 union lwip_sock_lastdata lastdata;
594 SYS_ARCH_DECL_PROTECT(lev);
595
596 /* Protect socket array */
597 SYS_ARCH_PROTECT(lev);
598
599 freed = free_socket_locked(sock, is_tcp, &conn, &lastdata);
600 SYS_ARCH_UNPROTECT(lev);
601 /* don't use 'sock' after this line, as another task might have allocated it */
602
603 if (freed) {
604 free_socket_free_elements(is_tcp, conn, &lastdata);
605 }
606 }
607
608 /* Below this, the well-known socket functions are implemented.
609 * Use google.com or opengroup.org to get a good description :-)
610 *
611 * Exceptions are documented!
612 */
613
614 int
lwip_accept(int s,struct sockaddr * addr,socklen_t * addrlen)615 lwip_accept(int s, struct sockaddr *addr, socklen_t *addrlen)
616 {
617 struct lwip_sock *sock, *nsock;
618 struct netconn *newconn;
619 ip_addr_t naddr;
620 u16_t port = 0;
621 int newsock;
622 err_t err;
623 int recvevent;
624 SYS_ARCH_DECL_PROTECT(lev);
625
626 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d)...\n", s));
627 sock = get_socket(s);
628 if (!sock) {
629 return -1;
630 }
631
632 /* wait for a new connection */
633 err = netconn_accept(sock->conn, &newconn);
634 if (err != ERR_OK) {
635 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_acept failed, err=%d\n", s, err));
636 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
637 set_errno(EOPNOTSUPP);
638 } else if (err == ERR_CLSD) {
639 set_errno(EINVAL);
640 } else {
641 set_errno(err_to_errno(err));
642 }
643 done_socket(sock);
644 return -1;
645 }
646 LWIP_ASSERT("newconn != NULL", newconn != NULL);
647
648 newsock = alloc_socket(newconn, 1);
649 if (newsock == -1) {
650 netconn_delete(newconn);
651 set_errno(ENFILE);
652 done_socket(sock);
653 return -1;
654 }
655 LWIP_ASSERT("invalid socket index", (newsock >= LWIP_SOCKET_OFFSET) && (newsock < NUM_SOCKETS + LWIP_SOCKET_OFFSET));
656 nsock = &sockets[newsock - LWIP_SOCKET_OFFSET];
657
658 /* See event_callback: If data comes in right away after an accept, even
659 * though the server task might not have created a new socket yet.
660 * In that case, newconn->socket is counted down (newconn->socket--),
661 * so nsock->rcvevent is >= 1 here!
662 */
663 SYS_ARCH_PROTECT(lev);
664 recvevent = (s16_t)(-1 - newconn->socket);
665 newconn->socket = newsock;
666 SYS_ARCH_UNPROTECT(lev);
667
668 if (newconn->callback) {
669 LOCK_TCPIP_CORE();
670 while (recvevent > 0) {
671 recvevent--;
672 newconn->callback(newconn, NETCONN_EVT_RCVPLUS, 0);
673 }
674 UNLOCK_TCPIP_CORE();
675 }
676
677 /* Note that POSIX only requires us to check addr is non-NULL. addrlen must
678 * not be NULL if addr is valid.
679 */
680 if ((addr != NULL) && (addrlen != NULL)) {
681 union sockaddr_aligned tempaddr;
682 /* get the IP address and port of the remote host */
683 err = netconn_peer(newconn, &naddr, &port);
684 if (err != ERR_OK) {
685 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d): netconn_peer failed, err=%d\n", s, err));
686 free_socket(nsock, 1);
687 set_errno(err_to_errno(err));
688 done_socket(sock);
689 return -1;
690 }
691
692 IPADDR_PORT_TO_SOCKADDR(&tempaddr, &naddr, port);
693 if (*addrlen > tempaddr.sa.sa_len) {
694 *addrlen = tempaddr.sa.sa_len;
695 }
696 MEMCPY(addr, &tempaddr, *addrlen);
697
698 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d addr=", s, newsock));
699 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
700 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", port));
701 } else {
702 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_accept(%d) returning new sock=%d", s, newsock));
703 }
704
705 set_errno(0);
706 done_socket(sock);
707 done_socket(nsock);
708 return newsock;
709 }
710
711 int
lwip_bind(int s,const struct sockaddr * name,socklen_t namelen)712 lwip_bind(int s, const struct sockaddr *name, socklen_t namelen)
713 {
714 struct lwip_sock *sock;
715 ip_addr_t local_addr;
716 u16_t local_port;
717 err_t err;
718
719 sock = get_socket(s);
720 if (!sock) {
721 return -1;
722 }
723
724 if (!SOCK_ADDR_TYPE_MATCH(name, sock)) {
725 /* sockaddr does not match socket type (IPv4/IPv6) */
726 set_errno(err_to_errno(ERR_VAL));
727 done_socket(sock);
728 return -1;
729 }
730
731 /* check size, family and alignment of 'name' */
732 LWIP_ERROR("lwip_bind: invalid address", (IS_SOCK_ADDR_LEN_VALID(namelen) &&
733 IS_SOCK_ADDR_TYPE_VALID(name) && IS_SOCK_ADDR_ALIGNED(name)),
734 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
735 LWIP_UNUSED_ARG(namelen);
736
737 SOCKADDR_TO_IPADDR_PORT(name, &local_addr, local_port);
738 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d, addr=", s));
739 ip_addr_debug_print_val(SOCKETS_DEBUG, local_addr);
740 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", local_port));
741
742 #if LWIP_IPV4 && LWIP_IPV6
743 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
744 if (IP_IS_V6_VAL(local_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&local_addr))) {
745 unmap_ipv4_mapped_ipv6(ip_2_ip4(&local_addr), ip_2_ip6(&local_addr));
746 IP_SET_TYPE_VAL(local_addr, IPADDR_TYPE_V4);
747 }
748 #endif /* LWIP_IPV4 && LWIP_IPV6 */
749
750 err = netconn_bind(sock->conn, &local_addr, local_port);
751
752 if (err != ERR_OK) {
753 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) failed, err=%d\n", s, err));
754 set_errno(err_to_errno(err));
755 done_socket(sock);
756 return -1;
757 }
758
759 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_bind(%d) succeeded\n", s));
760 set_errno(0);
761 done_socket(sock);
762 return 0;
763 }
764
765 int
lwip_close(int s)766 lwip_close(int s)
767 {
768 struct lwip_sock *sock;
769 int is_tcp = 0;
770 err_t err;
771
772 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_close(%d)\n", s));
773
774 sock = get_socket(s);
775 if (!sock) {
776 return -1;
777 }
778
779 if (sock->conn != NULL) {
780 is_tcp = NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP;
781 } else {
782 LWIP_ASSERT("sock->lastdata == NULL", sock->lastdata.pbuf == NULL);
783 }
784
785 #if LWIP_IGMP
786 /* drop all possibly joined IGMP memberships */
787 lwip_socket_drop_registered_memberships(s);
788 #endif /* LWIP_IGMP */
789 #if LWIP_IPV6_MLD
790 /* drop all possibly joined MLD6 memberships */
791 lwip_socket_drop_registered_mld6_memberships(s);
792 #endif /* LWIP_IPV6_MLD */
793
794 err = netconn_prepare_delete(sock->conn);
795 if (err != ERR_OK) {
796 set_errno(err_to_errno(err));
797 done_socket(sock);
798 return -1;
799 }
800
801 free_socket(sock, is_tcp);
802 set_errno(0);
803 return 0;
804 }
805
806 int
lwip_connect(int s,const struct sockaddr * name,socklen_t namelen)807 lwip_connect(int s, const struct sockaddr *name, socklen_t namelen)
808 {
809 struct lwip_sock *sock;
810 err_t err;
811
812 sock = get_socket(s);
813 if (!sock) {
814 return -1;
815 }
816
817 if (!SOCK_ADDR_TYPE_MATCH_OR_UNSPEC(name, sock)) {
818 /* sockaddr does not match socket type (IPv4/IPv6) */
819 set_errno(err_to_errno(ERR_VAL));
820 done_socket(sock);
821 return -1;
822 }
823
824 LWIP_UNUSED_ARG(namelen);
825 if (name->sa_family == AF_UNSPEC) {
826 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, AF_UNSPEC)\n", s));
827 err = netconn_disconnect(sock->conn);
828 } else {
829 ip_addr_t remote_addr;
830 u16_t remote_port;
831
832 /* check size, family and alignment of 'name' */
833 LWIP_ERROR("lwip_connect: invalid address", IS_SOCK_ADDR_LEN_VALID(namelen) &&
834 IS_SOCK_ADDR_TYPE_VALID_OR_UNSPEC(name) && IS_SOCK_ADDR_ALIGNED(name),
835 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
836
837 SOCKADDR_TO_IPADDR_PORT(name, &remote_addr, remote_port);
838 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d, addr=", s));
839 ip_addr_debug_print_val(SOCKETS_DEBUG, remote_addr);
840 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", remote_port));
841
842 #if LWIP_IPV4 && LWIP_IPV6
843 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
844 if (IP_IS_V6_VAL(remote_addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&remote_addr))) {
845 unmap_ipv4_mapped_ipv6(ip_2_ip4(&remote_addr), ip_2_ip6(&remote_addr));
846 IP_SET_TYPE_VAL(remote_addr, IPADDR_TYPE_V4);
847 }
848 #endif /* LWIP_IPV4 && LWIP_IPV6 */
849
850 err = netconn_connect(sock->conn, &remote_addr, remote_port);
851 }
852
853 if (err != ERR_OK) {
854 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) failed, err=%d\n", s, err));
855 set_errno(err_to_errno(err));
856 done_socket(sock);
857 return -1;
858 }
859
860 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_connect(%d) succeeded\n", s));
861 set_errno(0);
862 done_socket(sock);
863 return 0;
864 }
865
866 /**
867 * Set a socket into listen mode.
868 * The socket may not have been used for another connection previously.
869 *
870 * @param s the socket to set to listening mode
871 * @param backlog (ATTENTION: needs TCP_LISTEN_BACKLOG=1)
872 * @return 0 on success, non-zero on failure
873 */
874 int
lwip_listen(int s,int backlog)875 lwip_listen(int s, int backlog)
876 {
877 struct lwip_sock *sock;
878 err_t err;
879
880 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d, backlog=%d)\n", s, backlog));
881
882 sock = get_socket(s);
883 if (!sock) {
884 return -1;
885 }
886
887 /* limit the "backlog" parameter to fit in an u8_t */
888 backlog = LWIP_MIN(LWIP_MAX(backlog, 0), 0xff);
889
890 err = netconn_listen_with_backlog(sock->conn, (u8_t)backlog);
891
892 if (err != ERR_OK) {
893 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_listen(%d) failed, err=%d\n", s, err));
894 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
895 set_errno(EOPNOTSUPP);
896 } else {
897 set_errno(err_to_errno(err));
898 }
899 done_socket(sock);
900 return -1;
901 }
902
903 set_errno(0);
904 done_socket(sock);
905 return 0;
906 }
907
908 #if LWIP_TCP
909 /* Helper function to loop over receiving pbufs from netconn
910 * until "len" bytes are received or we're otherwise done.
911 * Keeps sock->lastdata for peeking or partly copying.
912 */
913 static ssize_t
lwip_recv_tcp(struct lwip_sock * sock,void * mem,size_t len,int flags)914 lwip_recv_tcp(struct lwip_sock *sock, void *mem, size_t len, int flags)
915 {
916 u8_t apiflags = NETCONN_NOAUTORCVD;
917 ssize_t recvd = 0;
918 ssize_t recv_left = (len <= SSIZE_MAX) ? (ssize_t)len : SSIZE_MAX;
919
920 LWIP_ASSERT("no socket given", sock != NULL);
921 LWIP_ASSERT("this should be checked internally", NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP);
922
923 if (flags & MSG_DONTWAIT) {
924 apiflags |= NETCONN_DONTBLOCK;
925 }
926
927 do {
928 struct pbuf *p;
929 err_t err;
930 u16_t copylen;
931
932 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: top while sock->lastdata=%p\n", (void *)sock->lastdata.pbuf));
933 /* Check if there is data left from the last recv operation. */
934 if (sock->lastdata.pbuf) {
935 p = sock->lastdata.pbuf;
936 } else {
937 /* No data was left from the previous operation, so we try to get
938 some from the network. */
939 err = netconn_recv_tcp_pbuf_flags(sock->conn, &p, apiflags);
940 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: netconn_recv err=%d, pbuf=%p\n",
941 err, (void *)p));
942
943 if (err != ERR_OK) {
944 if (recvd > 0) {
945 /* already received data, return that (this trusts in getting the same error from
946 netconn layer again next time netconn_recv is called) */
947 goto lwip_recv_tcp_done;
948 }
949 /* We should really do some error checking here. */
950 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: p == NULL, error is \"%s\"!\n",
951 lwip_strerr(err)));
952 set_errno(err_to_errno(err));
953 if (err == ERR_CLSD) {
954 return 0;
955 } else {
956 return -1;
957 }
958 }
959 LWIP_ASSERT("p != NULL", p != NULL);
960 sock->lastdata.pbuf = p;
961 }
962
963 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: buflen=%"U16_F" recv_left=%d off=%d\n",
964 p->tot_len, (int)recv_left, (int)recvd));
965
966 if (recv_left > p->tot_len) {
967 copylen = p->tot_len;
968 } else {
969 copylen = (u16_t)recv_left;
970 }
971 if (recvd + copylen < recvd) {
972 /* overflow */
973 copylen = (u16_t)(SSIZE_MAX - recvd);
974 }
975
976 /* copy the contents of the received buffer into
977 the supplied memory pointer mem */
978 pbuf_copy_partial(p, (u8_t *)mem + recvd, copylen, 0);
979
980 recvd += copylen;
981
982 /* TCP combines multiple pbufs for one recv */
983 LWIP_ASSERT("invalid copylen, len would underflow", recv_left >= copylen);
984 recv_left -= copylen;
985
986 /* Unless we peek the incoming message... */
987 if ((flags & MSG_PEEK) == 0) {
988 /* ... check if there is data left in the pbuf */
989 LWIP_ASSERT("invalid copylen", p->tot_len >= copylen);
990 if (p->tot_len - copylen > 0) {
991 /* If so, it should be saved in the sock structure for the next recv call.
992 We store the pbuf but hide/free the consumed data: */
993 sock->lastdata.pbuf = pbuf_free_header(p, copylen);
994 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: lastdata now pbuf=%p\n", (void *)sock->lastdata.pbuf));
995 } else {
996 sock->lastdata.pbuf = NULL;
997 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recv_tcp: deleting pbuf=%p\n", (void *)p));
998 pbuf_free(p);
999 }
1000 }
1001 /* once we have some data to return, only add more if we don't need to wait */
1002 apiflags |= NETCONN_DONTBLOCK | NETCONN_NOFIN;
1003 /* @todo: do we need to support peeking more than one pbuf? */
1004 } while ((recv_left > 0) && !(flags & MSG_PEEK));
1005 lwip_recv_tcp_done:
1006 if ((recvd > 0) && !(flags & MSG_PEEK)) {
1007 /* ensure window update after copying all data */
1008 netconn_tcp_recvd(sock->conn, (size_t)recvd);
1009 }
1010 set_errno(0);
1011 return recvd;
1012 }
1013 #endif
1014
1015 /* Convert a netbuf's address data to struct sockaddr */
1016 static int
lwip_sock_make_addr(struct netconn * conn,ip_addr_t * fromaddr,u16_t port,struct sockaddr * from,socklen_t * fromlen)1017 lwip_sock_make_addr(struct netconn *conn, ip_addr_t *fromaddr, u16_t port,
1018 struct sockaddr *from, socklen_t *fromlen)
1019 {
1020 int truncated = 0;
1021 union sockaddr_aligned saddr;
1022
1023 LWIP_UNUSED_ARG(conn);
1024
1025 LWIP_ASSERT("fromaddr != NULL", fromaddr != NULL);
1026 LWIP_ASSERT("from != NULL", from != NULL);
1027 LWIP_ASSERT("fromlen != NULL", fromlen != NULL);
1028
1029 #if LWIP_IPV4 && LWIP_IPV6
1030 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
1031 if (NETCONNTYPE_ISIPV6(netconn_type(conn)) && IP_IS_V4(fromaddr)) {
1032 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(fromaddr), ip_2_ip4(fromaddr));
1033 IP_SET_TYPE(fromaddr, IPADDR_TYPE_V6);
1034 }
1035 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1036
1037 IPADDR_PORT_TO_SOCKADDR(&saddr, fromaddr, port);
1038 if (*fromlen < saddr.sa.sa_len) {
1039 truncated = 1;
1040 } else if (*fromlen > saddr.sa.sa_len) {
1041 *fromlen = saddr.sa.sa_len;
1042 }
1043 MEMCPY(from, &saddr, *fromlen);
1044 return truncated;
1045 }
1046
1047 #if LWIP_TCP
1048 /* Helper function to get a tcp socket's remote address info */
1049 static int
lwip_recv_tcp_from(struct lwip_sock * sock,struct sockaddr * from,socklen_t * fromlen,const char * dbg_fn,int dbg_s,ssize_t dbg_ret)1050 lwip_recv_tcp_from(struct lwip_sock *sock, struct sockaddr *from, socklen_t *fromlen, const char *dbg_fn, int dbg_s, ssize_t dbg_ret)
1051 {
1052 if (sock == NULL) {
1053 return 0;
1054 }
1055 LWIP_UNUSED_ARG(dbg_fn);
1056 LWIP_UNUSED_ARG(dbg_s);
1057 LWIP_UNUSED_ARG(dbg_ret);
1058
1059 #if !SOCKETS_DEBUG
1060 if (from && fromlen)
1061 #endif /* !SOCKETS_DEBUG */
1062 {
1063 /* get remote addr/port from tcp_pcb */
1064 u16_t port;
1065 ip_addr_t tmpaddr;
1066 netconn_getaddr(sock->conn, &tmpaddr, &port, 0);
1067 LWIP_DEBUGF(SOCKETS_DEBUG, ("%s(%d): addr=", dbg_fn, dbg_s));
1068 ip_addr_debug_print_val(SOCKETS_DEBUG, tmpaddr);
1069 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", port, (int)dbg_ret));
1070 if (from && fromlen) {
1071 return lwip_sock_make_addr(sock->conn, &tmpaddr, port, from, fromlen);
1072 }
1073 }
1074 return 0;
1075 }
1076 #endif
1077
1078 /* Helper function to receive a netbuf from a udp or raw netconn.
1079 * Keeps sock->lastdata for peeking.
1080 */
1081 static err_t
lwip_recvfrom_udp_raw(struct lwip_sock * sock,int flags,struct msghdr * msg,u16_t * datagram_len,int dbg_s)1082 lwip_recvfrom_udp_raw(struct lwip_sock *sock, int flags, struct msghdr *msg, u16_t *datagram_len, int dbg_s)
1083 {
1084 struct netbuf *buf;
1085 u8_t apiflags;
1086 err_t err;
1087 u16_t buflen, copylen, copied;
1088 int i;
1089
1090 LWIP_UNUSED_ARG(dbg_s);
1091 LWIP_ERROR("lwip_recvfrom_udp_raw: invalid arguments", (msg->msg_iov != NULL) || (msg->msg_iovlen <= 0), return ERR_ARG;);
1092
1093 if (flags & MSG_DONTWAIT) {
1094 apiflags = NETCONN_DONTBLOCK;
1095 } else {
1096 apiflags = 0;
1097 }
1098
1099 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: top sock->lastdata=%p\n", (void *)sock->lastdata.netbuf));
1100 /* Check if there is data left from the last recv operation. */
1101 buf = sock->lastdata.netbuf;
1102 if (buf == NULL) {
1103 /* No data was left from the previous operation, so we try to get
1104 some from the network. */
1105 err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &buf, apiflags);
1106 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw[UDP/RAW]: netconn_recv err=%d, netbuf=%p\n",
1107 err, (void *)buf));
1108
1109 if (err != ERR_OK) {
1110 return err;
1111 }
1112 LWIP_ASSERT("buf != NULL", buf != NULL);
1113 sock->lastdata.netbuf = buf;
1114 }
1115 buflen = buf->p->tot_len;
1116 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw: buflen=%"U16_F"\n", buflen));
1117
1118 copied = 0;
1119 /* copy the pbuf payload into the iovs */
1120 for (i = 0; (i < msg->msg_iovlen) && (copied < buflen); i++) {
1121 u16_t len_left = (u16_t)(buflen - copied);
1122 if (msg->msg_iov[i].iov_len > len_left) {
1123 copylen = len_left;
1124 } else {
1125 copylen = (u16_t)msg->msg_iov[i].iov_len;
1126 }
1127
1128 /* copy the contents of the received buffer into
1129 the supplied memory buffer */
1130 pbuf_copy_partial(buf->p, (u8_t *)msg->msg_iov[i].iov_base, copylen, copied);
1131 copied = (u16_t)(copied + copylen);
1132 }
1133
1134 /* Check to see from where the data was.*/
1135 #if !SOCKETS_DEBUG
1136 if (msg->msg_name && msg->msg_namelen)
1137 #endif /* !SOCKETS_DEBUG */
1138 {
1139 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom_udp_raw(%d): addr=", dbg_s));
1140 ip_addr_debug_print_val(SOCKETS_DEBUG, *netbuf_fromaddr(buf));
1141 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F" len=%d\n", netbuf_fromport(buf), copied));
1142 if (msg->msg_name && msg->msg_namelen) {
1143 lwip_sock_make_addr(sock->conn, netbuf_fromaddr(buf), netbuf_fromport(buf),
1144 (struct sockaddr *)msg->msg_name, &msg->msg_namelen);
1145 }
1146 }
1147
1148 /* Initialize flag output */
1149 msg->msg_flags = 0;
1150
1151 if (msg->msg_control) {
1152 u8_t wrote_msg = 0;
1153 #if LWIP_NETBUF_RECVINFO
1154 /* Check if packet info was recorded */
1155 if (buf->flags & NETBUF_FLAG_DESTADDR) {
1156 if (IP_IS_V4(&buf->toaddr)) {
1157 #if LWIP_IPV4
1158 if (msg->msg_controllen >= CMSG_SPACE(sizeof(struct in_pktinfo))) {
1159 struct cmsghdr *chdr = CMSG_FIRSTHDR(msg); /* This will always return a header!! */
1160 struct in_pktinfo *pkti = (struct in_pktinfo *)CMSG_DATA(chdr);
1161 chdr->cmsg_level = IPPROTO_IP;
1162 chdr->cmsg_type = IP_PKTINFO;
1163 chdr->cmsg_len = CMSG_LEN(sizeof(struct in_pktinfo));
1164 pkti->ipi_ifindex = buf->p->if_idx;
1165 inet_addr_from_ip4addr(&pkti->ipi_addr, ip_2_ip4(netbuf_destaddr(buf)));
1166 msg->msg_controllen = CMSG_SPACE(sizeof(struct in_pktinfo));
1167 wrote_msg = 1;
1168 } else {
1169 msg->msg_flags |= MSG_CTRUNC;
1170 }
1171 #endif /* LWIP_IPV4 */
1172 }
1173 }
1174 #endif /* LWIP_NETBUF_RECVINFO */
1175
1176 if (!wrote_msg) {
1177 msg->msg_controllen = 0;
1178 }
1179 }
1180
1181 /* If we don't peek the incoming message: zero lastdata pointer and free the netbuf */
1182 if ((flags & MSG_PEEK) == 0) {
1183 sock->lastdata.netbuf = NULL;
1184 netbuf_delete(buf);
1185 }
1186 if (datagram_len) {
1187 *datagram_len = buflen;
1188 }
1189 return ERR_OK;
1190 }
1191
1192 ssize_t
lwip_recvfrom(int s,void * mem,size_t len,int flags,struct sockaddr * from,socklen_t * fromlen)1193 lwip_recvfrom(int s, void *mem, size_t len, int flags,
1194 struct sockaddr *from, socklen_t *fromlen)
1195 {
1196 struct lwip_sock *sock;
1197 ssize_t ret;
1198
1199 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom(%d, %p, %"SZT_F", 0x%x, ..)\n", s, mem, len, flags));
1200 sock = get_socket(s);
1201 if (!sock) {
1202 return -1;
1203 }
1204 #if LWIP_TCP
1205 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1206 ret = lwip_recv_tcp(sock, mem, len, flags);
1207 lwip_recv_tcp_from(sock, from, fromlen, "lwip_recvfrom", s, ret);
1208 done_socket(sock);
1209 return ret;
1210 } else
1211 #endif
1212 {
1213 u16_t datagram_len = 0;
1214 struct iovec vec;
1215 struct msghdr msg;
1216 err_t err;
1217 vec.iov_base = mem;
1218 vec.iov_len = len;
1219 msg.msg_control = NULL;
1220 msg.msg_controllen = 0;
1221 msg.msg_flags = 0;
1222 msg.msg_iov = &vec;
1223 msg.msg_iovlen = 1;
1224 msg.msg_name = from;
1225 msg.msg_namelen = (fromlen ? *fromlen : 0);
1226 err = lwip_recvfrom_udp_raw(sock, flags, &msg, &datagram_len, s);
1227 if (err != ERR_OK) {
1228 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvfrom[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1229 s, lwip_strerr(err)));
1230 set_errno(err_to_errno(err));
1231 done_socket(sock);
1232 return -1;
1233 }
1234 ret = (ssize_t)LWIP_MIN(LWIP_MIN(len, datagram_len), SSIZE_MAX);
1235 if (fromlen) {
1236 *fromlen = msg.msg_namelen;
1237 }
1238 }
1239
1240 set_errno(0);
1241 done_socket(sock);
1242 return ret;
1243 }
1244
1245 ssize_t
lwip_read(int s,void * mem,size_t len)1246 lwip_read(int s, void *mem, size_t len)
1247 {
1248 return lwip_recvfrom(s, mem, len, 0, NULL, NULL);
1249 }
1250
1251 ssize_t
lwip_readv(int s,const struct iovec * iov,int iovcnt)1252 lwip_readv(int s, const struct iovec *iov, int iovcnt)
1253 {
1254 struct msghdr msg;
1255
1256 msg.msg_name = NULL;
1257 msg.msg_namelen = 0;
1258 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1259 Blame the opengroup standard for this inconsistency. */
1260 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1261 msg.msg_iovlen = iovcnt;
1262 msg.msg_control = NULL;
1263 msg.msg_controllen = 0;
1264 msg.msg_flags = 0;
1265 return lwip_recvmsg(s, &msg, 0);
1266 }
1267
1268 ssize_t
lwip_recv(int s,void * mem,size_t len,int flags)1269 lwip_recv(int s, void *mem, size_t len, int flags)
1270 {
1271 return lwip_recvfrom(s, mem, len, flags, NULL, NULL);
1272 }
1273
1274 ssize_t
lwip_recvmsg(int s,struct msghdr * message,int flags)1275 lwip_recvmsg(int s, struct msghdr *message, int flags)
1276 {
1277 struct lwip_sock *sock;
1278 int i;
1279 ssize_t buflen;
1280
1281 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg(%d, message=%p, flags=0x%x)\n", s, (void *)message, flags));
1282 LWIP_ERROR("lwip_recvmsg: invalid message pointer", message != NULL, return ERR_ARG;);
1283 LWIP_ERROR("lwip_recvmsg: unsupported flags", (flags & ~(MSG_PEEK|MSG_DONTWAIT)) == 0,
1284 set_errno(EOPNOTSUPP); return -1;);
1285
1286 if ((message->msg_iovlen <= 0) || (message->msg_iovlen > IOV_MAX)) {
1287 set_errno(EMSGSIZE);
1288 return -1;
1289 }
1290
1291 sock = get_socket(s);
1292 if (!sock) {
1293 return -1;
1294 }
1295
1296 /* check for valid vectors */
1297 buflen = 0;
1298 for (i = 0; i < message->msg_iovlen; i++) {
1299 if ((message->msg_iov[i].iov_base == NULL) || ((ssize_t)message->msg_iov[i].iov_len <= 0) ||
1300 ((size_t)(ssize_t)message->msg_iov[i].iov_len != message->msg_iov[i].iov_len) ||
1301 ((ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len) <= 0)) {
1302 set_errno(err_to_errno(ERR_VAL));
1303 done_socket(sock);
1304 return -1;
1305 }
1306 buflen = (ssize_t)(buflen + (ssize_t)message->msg_iov[i].iov_len);
1307 }
1308
1309 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1310 #if LWIP_TCP
1311 int recv_flags = flags;
1312 message->msg_flags = 0;
1313 /* recv the data */
1314 buflen = 0;
1315 for (i = 0; i < message->msg_iovlen; i++) {
1316 /* try to receive into this vector's buffer */
1317 ssize_t recvd_local = lwip_recv_tcp(sock, message->msg_iov[i].iov_base, message->msg_iov[i].iov_len, recv_flags);
1318 if (recvd_local > 0) {
1319 /* sum up received bytes */
1320 buflen += recvd_local;
1321 }
1322 if ((recvd_local < 0) || (recvd_local < (int)message->msg_iov[i].iov_len) ||
1323 (flags & MSG_PEEK)) {
1324 /* returned prematurely (or peeking, which might actually be limitated to the first iov) */
1325 if (buflen <= 0) {
1326 /* nothing received at all, propagate the error */
1327 buflen = recvd_local;
1328 }
1329 break;
1330 }
1331 /* pass MSG_DONTWAIT to lwip_recv_tcp() to prevent waiting for more data */
1332 recv_flags |= MSG_DONTWAIT;
1333 }
1334 if (buflen > 0) {
1335 /* reset socket error since we have received something */
1336 set_errno(0);
1337 }
1338 /* " If the socket is connected, the msg_name and msg_namelen members shall be ignored." */
1339 done_socket(sock);
1340 return buflen;
1341 #else /* LWIP_TCP */
1342 set_errno(err_to_errno(ERR_ARG));
1343 done_socket(sock);
1344 return -1;
1345 #endif /* LWIP_TCP */
1346 }
1347 /* else, UDP and RAW NETCONNs */
1348 #if LWIP_UDP || LWIP_RAW
1349 {
1350 u16_t datagram_len = 0;
1351 err_t err;
1352 err = lwip_recvfrom_udp_raw(sock, flags, message, &datagram_len, s);
1353 if (err != ERR_OK) {
1354 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_recvmsg[UDP/RAW](%d): buf == NULL, error is \"%s\"!\n",
1355 s, lwip_strerr(err)));
1356 set_errno(err_to_errno(err));
1357 done_socket(sock);
1358 return -1;
1359 }
1360 if (datagram_len > buflen) {
1361 message->msg_flags |= MSG_TRUNC;
1362 }
1363
1364 set_errno(0);
1365 done_socket(sock);
1366 return (int)datagram_len;
1367 }
1368 #else /* LWIP_UDP || LWIP_RAW */
1369 set_errno(err_to_errno(ERR_ARG));
1370 done_socket(sock);
1371 return -1;
1372 #endif /* LWIP_UDP || LWIP_RAW */
1373 }
1374
1375 ssize_t
lwip_send(int s,const void * data,size_t size,int flags)1376 lwip_send(int s, const void *data, size_t size, int flags)
1377 {
1378 struct lwip_sock *sock;
1379 err_t err;
1380 u8_t write_flags;
1381 size_t written;
1382
1383 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d, data=%p, size=%"SZT_F", flags=0x%x)\n",
1384 s, data, size, flags));
1385
1386 sock = get_socket(s);
1387 if (!sock) {
1388 return -1;
1389 }
1390
1391 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
1392 #if (LWIP_UDP || LWIP_RAW)
1393 done_socket(sock);
1394 return lwip_sendto(s, data, size, flags, NULL, 0);
1395 #else /* (LWIP_UDP || LWIP_RAW) */
1396 set_errno(err_to_errno(ERR_ARG));
1397 done_socket(sock);
1398 return -1;
1399 #endif /* (LWIP_UDP || LWIP_RAW) */
1400 }
1401
1402 write_flags = (u8_t)(NETCONN_COPY |
1403 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1404 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1405 written = 0;
1406 err = netconn_write_partly(sock->conn, data, size, write_flags, &written);
1407
1408 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_send(%d) err=%d written=%"SZT_F"\n", s, err, written));
1409 set_errno(err_to_errno(err));
1410 done_socket(sock);
1411 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1412 return (err == ERR_OK ? (ssize_t)written : -1);
1413 }
1414
1415 ssize_t
lwip_sendmsg(int s,const struct msghdr * msg,int flags)1416 lwip_sendmsg(int s, const struct msghdr *msg, int flags)
1417 {
1418 struct lwip_sock *sock;
1419 #if LWIP_TCP
1420 u8_t write_flags;
1421 size_t written;
1422 #endif
1423 err_t err = ERR_OK;
1424
1425 sock = get_socket(s);
1426 if (!sock) {
1427 return -1;
1428 }
1429
1430 LWIP_ERROR("lwip_sendmsg: invalid msghdr", msg != NULL,
1431 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1432 LWIP_ERROR("lwip_sendmsg: invalid msghdr iov", msg->msg_iov != NULL,
1433 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1434 LWIP_ERROR("lwip_sendmsg: maximum iovs exceeded", (msg->msg_iovlen > 0) && (msg->msg_iovlen <= IOV_MAX),
1435 set_errno(EMSGSIZE); done_socket(sock); return -1;);
1436 LWIP_ERROR("lwip_sendmsg: unsupported flags", (flags & ~(MSG_DONTWAIT | MSG_MORE)) == 0,
1437 set_errno(EOPNOTSUPP); done_socket(sock); return -1;);
1438
1439 LWIP_UNUSED_ARG(msg->msg_control);
1440 LWIP_UNUSED_ARG(msg->msg_controllen);
1441 LWIP_UNUSED_ARG(msg->msg_flags);
1442
1443 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1444 #if LWIP_TCP
1445 write_flags = (u8_t)(NETCONN_COPY |
1446 ((flags & MSG_MORE) ? NETCONN_MORE : 0) |
1447 ((flags & MSG_DONTWAIT) ? NETCONN_DONTBLOCK : 0));
1448
1449 written = 0;
1450 err = netconn_write_vectors_partly(sock->conn, (struct netvector *)msg->msg_iov, (u16_t)msg->msg_iovlen, write_flags, &written);
1451 set_errno(err_to_errno(err));
1452 done_socket(sock);
1453 /* casting 'written' to ssize_t is OK here since the netconn API limits it to SSIZE_MAX */
1454 return (err == ERR_OK ? (ssize_t)written : -1);
1455 #else /* LWIP_TCP */
1456 set_errno(err_to_errno(ERR_ARG));
1457 done_socket(sock);
1458 return -1;
1459 #endif /* LWIP_TCP */
1460 }
1461 /* else, UDP and RAW NETCONNs */
1462 #if LWIP_UDP || LWIP_RAW
1463 {
1464 struct netbuf chain_buf;
1465 int i;
1466 ssize_t size = 0;
1467
1468 LWIP_UNUSED_ARG(flags);
1469 LWIP_ERROR("lwip_sendmsg: invalid msghdr name", (((msg->msg_name == NULL) && (msg->msg_namelen == 0)) ||
1470 IS_SOCK_ADDR_LEN_VALID(msg->msg_namelen)),
1471 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1472
1473 /* initialize chain buffer with destination */
1474 memset(&chain_buf, 0, sizeof(struct netbuf));
1475 if (msg->msg_name) {
1476 u16_t remote_port;
1477 SOCKADDR_TO_IPADDR_PORT((const struct sockaddr *)msg->msg_name, &chain_buf.addr, remote_port);
1478 netbuf_fromport(&chain_buf) = remote_port;
1479 }
1480 #if LWIP_NETIF_TX_SINGLE_PBUF
1481 for (i = 0; i < msg->msg_iovlen; i++) {
1482 size += msg->msg_iov[i].iov_len;
1483 if ((msg->msg_iov[i].iov_len > INT_MAX) || (size < (int)msg->msg_iov[i].iov_len)) {
1484 /* overflow */
1485 goto sendmsg_emsgsize;
1486 }
1487 }
1488 if (size > 0xFFFF) {
1489 /* overflow */
1490 goto sendmsg_emsgsize;
1491 }
1492 /* Allocate a new netbuf and copy the data into it. */
1493 if (netbuf_alloc(&chain_buf, (u16_t)size) == NULL) {
1494 err = ERR_MEM;
1495 } else {
1496 /* flatten the IO vectors */
1497 size_t offset = 0;
1498 for (i = 0; i < msg->msg_iovlen; i++) {
1499 MEMCPY(&((u8_t *)chain_buf.p->payload)[offset], msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
1500 offset += msg->msg_iov[i].iov_len;
1501 }
1502 #if LWIP_CHECKSUM_ON_COPY
1503 {
1504 /* This can be improved by using LWIP_CHKSUM_COPY() and aggregating the checksum for each IO vector */
1505 u16_t chksum = ~inet_chksum_pbuf(chain_buf.p);
1506 netbuf_set_chksum(&chain_buf, chksum);
1507 }
1508 #endif /* LWIP_CHECKSUM_ON_COPY */
1509 err = ERR_OK;
1510 }
1511 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1512 /* create a chained netbuf from the IO vectors. NOTE: we assemble a pbuf chain
1513 manually to avoid having to allocate, chain, and delete a netbuf for each iov */
1514 for (i = 0; i < msg->msg_iovlen; i++) {
1515 struct pbuf *p;
1516 if (msg->msg_iov[i].iov_len > 0xFFFF) {
1517 /* overflow */
1518 goto sendmsg_emsgsize;
1519 }
1520 p = pbuf_alloc(PBUF_TRANSPORT, 0, PBUF_REF);
1521 if (p == NULL) {
1522 err = ERR_MEM; /* let netbuf_delete() cleanup chain_buf */
1523 break;
1524 }
1525 p->payload = msg->msg_iov[i].iov_base;
1526 p->len = p->tot_len = (u16_t)msg->msg_iov[i].iov_len;
1527 /* netbuf empty, add new pbuf */
1528 if (chain_buf.p == NULL) {
1529 chain_buf.p = chain_buf.ptr = p;
1530 /* add pbuf to existing pbuf chain */
1531 } else {
1532 if (chain_buf.p->tot_len + p->len > 0xffff) {
1533 /* overflow */
1534 pbuf_free(p);
1535 goto sendmsg_emsgsize;
1536 }
1537 pbuf_cat(chain_buf.p, p);
1538 }
1539 }
1540 /* save size of total chain */
1541 if (err == ERR_OK) {
1542 size = netbuf_len(&chain_buf);
1543 }
1544 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1545
1546 if (err == ERR_OK) {
1547 #if LWIP_IPV4 && LWIP_IPV6
1548 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1549 if (IP_IS_V6_VAL(chain_buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&chain_buf.addr))) {
1550 unmap_ipv4_mapped_ipv6(ip_2_ip4(&chain_buf.addr), ip_2_ip6(&chain_buf.addr));
1551 IP_SET_TYPE_VAL(chain_buf.addr, IPADDR_TYPE_V4);
1552 }
1553 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1554
1555 /* send the data */
1556 err = netconn_send(sock->conn, &chain_buf);
1557 }
1558
1559 /* deallocated the buffer */
1560 netbuf_free(&chain_buf);
1561
1562 set_errno(err_to_errno(err));
1563 done_socket(sock);
1564 return (err == ERR_OK ? size : -1);
1565 sendmsg_emsgsize:
1566 set_errno(EMSGSIZE);
1567 netbuf_free(&chain_buf);
1568 done_socket(sock);
1569 return -1;
1570 }
1571 #else /* LWIP_UDP || LWIP_RAW */
1572 set_errno(err_to_errno(ERR_ARG));
1573 done_socket(sock);
1574 return -1;
1575 #endif /* LWIP_UDP || LWIP_RAW */
1576 }
1577
1578 ssize_t
lwip_sendto(int s,const void * data,size_t size,int flags,const struct sockaddr * to,socklen_t tolen)1579 lwip_sendto(int s, const void *data, size_t size, int flags,
1580 const struct sockaddr *to, socklen_t tolen)
1581 {
1582 struct lwip_sock *sock;
1583 err_t err;
1584 u16_t short_size;
1585 u16_t remote_port;
1586 struct netbuf buf;
1587
1588 sock = get_socket(s);
1589 if (!sock) {
1590 return -1;
1591 }
1592
1593 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
1594 #if LWIP_TCP
1595 done_socket(sock);
1596 return lwip_send(s, data, size, flags);
1597 #else /* LWIP_TCP */
1598 LWIP_UNUSED_ARG(flags);
1599 set_errno(err_to_errno(ERR_ARG));
1600 done_socket(sock);
1601 return -1;
1602 #endif /* LWIP_TCP */
1603 }
1604
1605 if (size > LWIP_MIN(0xFFFF, SSIZE_MAX)) {
1606 /* cannot fit into one datagram (at least for us) */
1607 set_errno(EMSGSIZE);
1608 done_socket(sock);
1609 return -1;
1610 }
1611 short_size = (u16_t)size;
1612 LWIP_ERROR("lwip_sendto: invalid address", (((to == NULL) && (tolen == 0)) ||
1613 (IS_SOCK_ADDR_LEN_VALID(tolen) &&
1614 ((to != NULL) && (IS_SOCK_ADDR_TYPE_VALID(to) && IS_SOCK_ADDR_ALIGNED(to))))),
1615 set_errno(err_to_errno(ERR_ARG)); done_socket(sock); return -1;);
1616 LWIP_UNUSED_ARG(tolen);
1617
1618 /* initialize a buffer */
1619 buf.p = buf.ptr = NULL;
1620 #if LWIP_CHECKSUM_ON_COPY
1621 buf.flags = 0;
1622 #endif /* LWIP_CHECKSUM_ON_COPY */
1623 if (to) {
1624 SOCKADDR_TO_IPADDR_PORT(to, &buf.addr, remote_port);
1625 } else {
1626 remote_port = 0;
1627 ip_addr_set_any(NETCONNTYPE_ISIPV6(netconn_type(sock->conn)), &buf.addr);
1628 }
1629 netbuf_fromport(&buf) = remote_port;
1630
1631
1632 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_sendto(%d, data=%p, short_size=%"U16_F", flags=0x%x to=",
1633 s, data, short_size, flags));
1634 ip_addr_debug_print_val(SOCKETS_DEBUG, buf.addr);
1635 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F"\n", remote_port));
1636
1637 /* make the buffer point to the data that should be sent */
1638 #if LWIP_NETIF_TX_SINGLE_PBUF
1639 /* Allocate a new netbuf and copy the data into it. */
1640 if (netbuf_alloc(&buf, short_size) == NULL) {
1641 err = ERR_MEM;
1642 } else {
1643 #if LWIP_CHECKSUM_ON_COPY
1644 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_RAW) {
1645 u16_t chksum = LWIP_CHKSUM_COPY(buf.p->payload, data, short_size);
1646 netbuf_set_chksum(&buf, chksum);
1647 } else
1648 #endif /* LWIP_CHECKSUM_ON_COPY */
1649 {
1650 MEMCPY(buf.p->payload, data, short_size);
1651 }
1652 err = ERR_OK;
1653 }
1654 #else /* LWIP_NETIF_TX_SINGLE_PBUF */
1655 err = netbuf_ref(&buf, data, short_size);
1656 #endif /* LWIP_NETIF_TX_SINGLE_PBUF */
1657 if (err == ERR_OK) {
1658 #if LWIP_IPV4 && LWIP_IPV6
1659 /* Dual-stack: Unmap IPv4 mapped IPv6 addresses */
1660 if (IP_IS_V6_VAL(buf.addr) && ip6_addr_isipv4mappedipv6(ip_2_ip6(&buf.addr))) {
1661 unmap_ipv4_mapped_ipv6(ip_2_ip4(&buf.addr), ip_2_ip6(&buf.addr));
1662 IP_SET_TYPE_VAL(buf.addr, IPADDR_TYPE_V4);
1663 }
1664 #endif /* LWIP_IPV4 && LWIP_IPV6 */
1665
1666 /* send the data */
1667 err = netconn_send(sock->conn, &buf);
1668 }
1669
1670 /* deallocated the buffer */
1671 netbuf_free(&buf);
1672
1673 set_errno(err_to_errno(err));
1674 done_socket(sock);
1675 return (err == ERR_OK ? short_size : -1);
1676 }
1677
1678 int
lwip_socket(int domain,int type,int protocol)1679 lwip_socket(int domain, int type, int protocol)
1680 {
1681 struct netconn *conn;
1682 int i;
1683
1684 LWIP_UNUSED_ARG(domain); /* @todo: check this */
1685
1686 /* create a netconn */
1687 switch (type) {
1688 case SOCK_RAW:
1689 conn = netconn_new_with_proto_and_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_RAW),
1690 (u8_t)protocol, DEFAULT_SOCKET_EVENTCB);
1691 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_RAW, %d) = ",
1692 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1693 break;
1694 case SOCK_DGRAM:
1695 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain,
1696 ((protocol == IPPROTO_UDPLITE) ? NETCONN_UDPLITE : NETCONN_UDP)),
1697 DEFAULT_SOCKET_EVENTCB);
1698 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_DGRAM, %d) = ",
1699 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1700 #if LWIP_NETBUF_RECVINFO
1701 if (conn) {
1702 /* netconn layer enables pktinfo by default, sockets default to off */
1703 conn->flags &= ~NETCONN_FLAG_PKTINFO;
1704 }
1705 #endif /* LWIP_NETBUF_RECVINFO */
1706 break;
1707 case SOCK_STREAM:
1708 conn = netconn_new_with_callback(DOMAIN_TO_NETCONN_TYPE(domain, NETCONN_TCP), DEFAULT_SOCKET_EVENTCB);
1709 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%s, SOCK_STREAM, %d) = ",
1710 domain == PF_INET ? "PF_INET" : "UNKNOWN", protocol));
1711 break;
1712 default:
1713 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_socket(%d, %d/UNKNOWN, %d) = -1\n",
1714 domain, type, protocol));
1715 set_errno(EINVAL);
1716 return -1;
1717 }
1718
1719 if (!conn) {
1720 LWIP_DEBUGF(SOCKETS_DEBUG, ("-1 / ENOBUFS (could not create netconn)\n"));
1721 set_errno(ENOBUFS);
1722 return -1;
1723 }
1724
1725 i = alloc_socket(conn, 0);
1726
1727 if (i == -1) {
1728 netconn_delete(conn);
1729 set_errno(ENFILE);
1730 return -1;
1731 }
1732 conn->socket = i;
1733 done_socket(&sockets[i - LWIP_SOCKET_OFFSET]);
1734 LWIP_DEBUGF(SOCKETS_DEBUG, ("%d\n", i));
1735 set_errno(0);
1736 return i;
1737 }
1738
1739 ssize_t
lwip_write(int s,const void * data,size_t size)1740 lwip_write(int s, const void *data, size_t size)
1741 {
1742 return lwip_send(s, data, size, 0);
1743 }
1744
1745 ssize_t
lwip_writev(int s,const struct iovec * iov,int iovcnt)1746 lwip_writev(int s, const struct iovec *iov, int iovcnt)
1747 {
1748 struct msghdr msg;
1749
1750 msg.msg_name = NULL;
1751 msg.msg_namelen = 0;
1752 /* Hack: we have to cast via number to cast from 'const' pointer to non-const.
1753 Blame the opengroup standard for this inconsistency. */
1754 msg.msg_iov = LWIP_CONST_CAST(struct iovec *, iov);
1755 msg.msg_iovlen = iovcnt;
1756 msg.msg_control = NULL;
1757 msg.msg_controllen = 0;
1758 msg.msg_flags = 0;
1759 return lwip_sendmsg(s, &msg, 0);
1760 }
1761
1762 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
1763 /* Add select_cb to select_cb_list. */
1764 static void
lwip_link_select_cb(struct lwip_select_cb * select_cb)1765 lwip_link_select_cb(struct lwip_select_cb *select_cb)
1766 {
1767 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1768
1769 /* Protect the select_cb_list */
1770 LWIP_SOCKET_SELECT_PROTECT(lev);
1771
1772 /* Put this select_cb on top of list */
1773 select_cb->next = select_cb_list;
1774 if (select_cb_list != NULL) {
1775 select_cb_list->prev = select_cb;
1776 }
1777 select_cb_list = select_cb;
1778 #if !LWIP_TCPIP_CORE_LOCKING
1779 /* Increasing this counter tells select_check_waiters that the list has changed. */
1780 select_cb_ctr++;
1781 #endif
1782
1783 /* Now we can safely unprotect */
1784 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1785 }
1786
1787 /* Remove select_cb from select_cb_list. */
1788 static void
lwip_unlink_select_cb(struct lwip_select_cb * select_cb)1789 lwip_unlink_select_cb(struct lwip_select_cb *select_cb)
1790 {
1791 LWIP_SOCKET_SELECT_DECL_PROTECT(lev);
1792
1793 /* Take us off the list */
1794 LWIP_SOCKET_SELECT_PROTECT(lev);
1795 if (select_cb->next != NULL) {
1796 select_cb->next->prev = select_cb->prev;
1797 }
1798 if (select_cb_list == select_cb) {
1799 LWIP_ASSERT("select_cb->prev == NULL", select_cb->prev == NULL);
1800 select_cb_list = select_cb->next;
1801 } else {
1802 LWIP_ASSERT("select_cb->prev != NULL", select_cb->prev != NULL);
1803 select_cb->prev->next = select_cb->next;
1804 }
1805 #if !LWIP_TCPIP_CORE_LOCKING
1806 /* Increasing this counter tells select_check_waiters that the list has changed. */
1807 select_cb_ctr++;
1808 #endif
1809 LWIP_SOCKET_SELECT_UNPROTECT(lev);
1810 }
1811 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
1812
1813 #if LWIP_SOCKET_SELECT
1814 /**
1815 * Go through the readset and writeset lists and see which socket of the sockets
1816 * set in the sets has events. On return, readset, writeset and exceptset have
1817 * the sockets enabled that had events.
1818 *
1819 * @param maxfdp1 the highest socket index in the sets
1820 * @param readset_in set of sockets to check for read events
1821 * @param writeset_in set of sockets to check for write events
1822 * @param exceptset_in set of sockets to check for error events
1823 * @param readset_out set of sockets that had read events
1824 * @param writeset_out set of sockets that had write events
1825 * @param exceptset_out set os sockets that had error events
1826 * @return number of sockets that had events (read/write/exception) (>= 0)
1827 */
1828 static int
lwip_selscan(int maxfdp1,fd_set * readset_in,fd_set * writeset_in,fd_set * exceptset_in,fd_set * readset_out,fd_set * writeset_out,fd_set * exceptset_out)1829 lwip_selscan(int maxfdp1, fd_set *readset_in, fd_set *writeset_in, fd_set *exceptset_in,
1830 fd_set *readset_out, fd_set *writeset_out, fd_set *exceptset_out)
1831 {
1832 int i, nready = 0;
1833 fd_set lreadset, lwriteset, lexceptset;
1834 struct lwip_sock *sock;
1835 SYS_ARCH_DECL_PROTECT(lev);
1836
1837 FD_ZERO(&lreadset);
1838 FD_ZERO(&lwriteset);
1839 FD_ZERO(&lexceptset);
1840
1841 /* Go through each socket in each list to count number of sockets which
1842 currently match */
1843 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
1844 /* if this FD is not in the set, continue */
1845 if (!(readset_in && FD_ISSET(i, readset_in)) &&
1846 !(writeset_in && FD_ISSET(i, writeset_in)) &&
1847 !(exceptset_in && FD_ISSET(i, exceptset_in))) {
1848 continue;
1849 }
1850 /* First get the socket's status (protected)... */
1851 SYS_ARCH_PROTECT(lev);
1852 sock = tryget_socket_unconn_locked(i);
1853 if (sock != NULL) {
1854 void *lastdata = sock->lastdata.pbuf;
1855 s16_t rcvevent = sock->rcvevent;
1856 u16_t sendevent = sock->sendevent;
1857 u16_t errevent = sock->errevent;
1858 SYS_ARCH_UNPROTECT(lev);
1859
1860 /* ... then examine it: */
1861 /* See if netconn of this socket is ready for read */
1862 if (readset_in && FD_ISSET(i, readset_in) && ((lastdata != NULL) || (rcvevent > 0))) {
1863 FD_SET(i, &lreadset);
1864 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for reading\n", i));
1865 nready++;
1866 }
1867 /* See if netconn of this socket is ready for write */
1868 if (writeset_in && FD_ISSET(i, writeset_in) && (sendevent != 0)) {
1869 FD_SET(i, &lwriteset);
1870 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for writing\n", i));
1871 nready++;
1872 }
1873 /* See if netconn of this socket had an error */
1874 if (exceptset_in && FD_ISSET(i, exceptset_in) && (errevent != 0)) {
1875 FD_SET(i, &lexceptset);
1876 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_selscan: fd=%d ready for exception\n", i));
1877 nready++;
1878 }
1879 done_socket(sock);
1880 } else {
1881 SYS_ARCH_UNPROTECT(lev);
1882 /* no a valid open socket */
1883 return -1;
1884 }
1885 }
1886 /* copy local sets to the ones provided as arguments */
1887 *readset_out = lreadset;
1888 *writeset_out = lwriteset;
1889 *exceptset_out = lexceptset;
1890
1891 LWIP_ASSERT("nready >= 0", nready >= 0);
1892 return nready;
1893 }
1894
1895 #if LWIP_NETCONN_FULLDUPLEX
1896 /* Mark all of the set sockets in one of the three fdsets passed to select as used.
1897 * All sockets are marked (and later unmarked), whether they are open or not.
1898 * This is OK as lwip_selscan aborts select when non-open sockets are found.
1899 */
1900 static void
lwip_select_inc_sockets_used_set(int maxfdp,fd_set * fdset,fd_set * used_sockets)1901 lwip_select_inc_sockets_used_set(int maxfdp, fd_set *fdset, fd_set *used_sockets)
1902 {
1903 SYS_ARCH_DECL_PROTECT(lev);
1904 if (fdset) {
1905 int i;
1906 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1907 /* if this FD is in the set, lock it (unless already done) */
1908 if (FD_ISSET(i, fdset) && !FD_ISSET(i, used_sockets)) {
1909 struct lwip_sock *sock;
1910 SYS_ARCH_PROTECT(lev);
1911 sock = tryget_socket_unconn_locked(i);
1912 if (sock != NULL) {
1913 /* leave the socket used until released by lwip_select_dec_sockets_used */
1914 FD_SET(i, used_sockets);
1915 }
1916 SYS_ARCH_UNPROTECT(lev);
1917 }
1918 }
1919 }
1920 }
1921
1922 /* Mark all sockets passed to select as used to prevent them from being freed
1923 * from other threads while select is running.
1924 * Marked sockets are added to 'used_sockets' to mark them only once an be able
1925 * to unmark them correctly.
1926 */
1927 static void
lwip_select_inc_sockets_used(int maxfdp,fd_set * fdset1,fd_set * fdset2,fd_set * fdset3,fd_set * used_sockets)1928 lwip_select_inc_sockets_used(int maxfdp, fd_set *fdset1, fd_set *fdset2, fd_set *fdset3, fd_set *used_sockets)
1929 {
1930 FD_ZERO(used_sockets);
1931 lwip_select_inc_sockets_used_set(maxfdp, fdset1, used_sockets);
1932 lwip_select_inc_sockets_used_set(maxfdp, fdset2, used_sockets);
1933 lwip_select_inc_sockets_used_set(maxfdp, fdset3, used_sockets);
1934 }
1935
1936 /* Let go all sockets that were marked as used when starting select */
1937 static void
lwip_select_dec_sockets_used(int maxfdp,fd_set * used_sockets)1938 lwip_select_dec_sockets_used(int maxfdp, fd_set *used_sockets)
1939 {
1940 int i;
1941 for (i = LWIP_SOCKET_OFFSET; i < maxfdp; i++) {
1942 /* if this FD is not in the set, continue */
1943 if (FD_ISSET(i, used_sockets)) {
1944 struct lwip_sock *sock = tryget_socket_unconn_nouse(i);
1945 LWIP_ASSERT("socket gone at the end of select", sock != NULL);
1946 if (sock != NULL) {
1947 done_socket(sock);
1948 }
1949 }
1950 }
1951 }
1952 #else /* LWIP_NETCONN_FULLDUPLEX */
1953 #define lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, used_sockets)
1954 #define lwip_select_dec_sockets_used(maxfdp1, used_sockets)
1955 #endif /* LWIP_NETCONN_FULLDUPLEX */
1956
1957 int
lwip_select(int maxfdp1,fd_set * readset,fd_set * writeset,fd_set * exceptset,struct timeval * timeout)1958 lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset,
1959 struct timeval *timeout)
1960 {
1961 u32_t waitres = 0;
1962 int nready;
1963 fd_set lreadset, lwriteset, lexceptset;
1964 u32_t msectimeout;
1965 int i;
1966 int maxfdp2;
1967 #if LWIP_NETCONN_SEM_PER_THREAD
1968 int waited = 0;
1969 #endif
1970 #if LWIP_NETCONN_FULLDUPLEX
1971 fd_set used_sockets;
1972 #endif
1973 SYS_ARCH_DECL_PROTECT(lev);
1974
1975 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%"S32_F" tvusec=%"S32_F")\n",
1976 maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset,
1977 timeout ? (s32_t)timeout->tv_sec : (s32_t) - 1,
1978 timeout ? (s32_t)timeout->tv_usec : (s32_t) - 1));
1979
1980 if ((maxfdp1 < 0) || (maxfdp1 > LWIP_SELECT_MAXNFDS)) {
1981 set_errno(EINVAL);
1982 return -1;
1983 }
1984
1985 lwip_select_inc_sockets_used(maxfdp1, readset, writeset, exceptset, &used_sockets);
1986
1987 /* Go through each socket in each list to count number of sockets which
1988 currently match */
1989 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
1990
1991 if (nready < 0) {
1992 /* one of the sockets in one of the fd_sets was invalid */
1993 set_errno(EBADF);
1994 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
1995 return -1;
1996 } else if (nready > 0) {
1997 /* one or more sockets are set, no need to wait */
1998 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
1999 } else {
2000 /* If we don't have any current events, then suspend if we are supposed to */
2001 if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) {
2002 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n"));
2003 /* This is OK as the local fdsets are empty and nready is zero,
2004 or we would have returned earlier. */
2005 } else {
2006 /* None ready: add our semaphore to list:
2007 We don't actually need any dynamic memory. Our entry on the
2008 list is only valid while we are in this function, so it's ok
2009 to use local variables (unless we're running in MPU compatible
2010 mode). */
2011 API_SELECT_CB_VAR_DECLARE(select_cb);
2012 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(ENOMEM); lwip_select_dec_sockets_used(maxfdp1, &used_sockets); return -1);
2013 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2014
2015 API_SELECT_CB_VAR_REF(select_cb).readset = readset;
2016 API_SELECT_CB_VAR_REF(select_cb).writeset = writeset;
2017 API_SELECT_CB_VAR_REF(select_cb).exceptset = exceptset;
2018 #if LWIP_NETCONN_SEM_PER_THREAD
2019 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2020 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2021 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2022 /* failed to create semaphore */
2023 set_errno(ENOMEM);
2024 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2025 API_SELECT_CB_VAR_FREE(select_cb);
2026 return -1;
2027 }
2028 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2029
2030 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2031
2032 /* Increase select_waiting for each socket we are interested in */
2033 maxfdp2 = maxfdp1;
2034 for (i = LWIP_SOCKET_OFFSET; i < maxfdp1; i++) {
2035 if ((readset && FD_ISSET(i, readset)) ||
2036 (writeset && FD_ISSET(i, writeset)) ||
2037 (exceptset && FD_ISSET(i, exceptset))) {
2038 struct lwip_sock *sock;
2039 SYS_ARCH_PROTECT(lev);
2040 sock = tryget_socket_unconn_locked(i);
2041 if (sock != NULL) {
2042 sock->select_waiting++;
2043 if (sock->select_waiting == 0) {
2044 /* overflow - too many threads waiting */
2045 sock->select_waiting--;
2046 nready = -1;
2047 maxfdp2 = i;
2048 SYS_ARCH_UNPROTECT(lev);
2049 done_socket(sock);
2050 set_errno(EBUSY);
2051 break;
2052 }
2053 SYS_ARCH_UNPROTECT(lev);
2054 done_socket(sock);
2055 } else {
2056 /* Not a valid socket */
2057 nready = -1;
2058 maxfdp2 = i;
2059 SYS_ARCH_UNPROTECT(lev);
2060 set_errno(EBADF);
2061 break;
2062 }
2063 }
2064 }
2065
2066 if (nready >= 0) {
2067 /* Call lwip_selscan again: there could have been events between
2068 the last scan (without us on the list) and putting us on the list! */
2069 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2070 if (!nready) {
2071 /* Still none ready, just wait to be woken */
2072 if (timeout == 0) {
2073 /* Wait forever */
2074 msectimeout = 0;
2075 } else {
2076 long msecs_long = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500) / 1000));
2077 if (msecs_long <= 0) {
2078 /* Wait 1ms at least (0 means wait forever) */
2079 msectimeout = 1;
2080 } else {
2081 msectimeout = (u32_t)msecs_long;
2082 }
2083 }
2084
2085 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2086 #if LWIP_NETCONN_SEM_PER_THREAD
2087 waited = 1;
2088 #endif
2089 }
2090 }
2091
2092 /* Decrease select_waiting for each socket we are interested in */
2093 for (i = LWIP_SOCKET_OFFSET; i < maxfdp2; i++) {
2094 if ((readset && FD_ISSET(i, readset)) ||
2095 (writeset && FD_ISSET(i, writeset)) ||
2096 (exceptset && FD_ISSET(i, exceptset))) {
2097 struct lwip_sock *sock;
2098 SYS_ARCH_PROTECT(lev);
2099 sock = tryget_socket_unconn_locked(i);
2100 if (sock != NULL) {
2101 /* for now, handle select_waiting==0... */
2102 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2103 if (sock->select_waiting > 0) {
2104 sock->select_waiting--;
2105 }
2106 SYS_ARCH_UNPROTECT(lev);
2107 done_socket(sock);
2108 } else {
2109 SYS_ARCH_UNPROTECT(lev);
2110 /* Not a valid socket */
2111 nready = -1;
2112 set_errno(EBADF);
2113 }
2114 }
2115 }
2116
2117 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2118
2119 #if LWIP_NETCONN_SEM_PER_THREAD
2120 if (API_SELECT_CB_VAR_REF(select_cb).sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2121 /* don't leave the thread-local semaphore signalled */
2122 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2123 }
2124 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2125 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2126 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2127 API_SELECT_CB_VAR_FREE(select_cb);
2128
2129 if (nready < 0) {
2130 /* This happens when a socket got closed while waiting */
2131 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2132 return -1;
2133 }
2134
2135 if (waitres == SYS_ARCH_TIMEOUT) {
2136 /* Timeout */
2137 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n"));
2138 /* This is OK as the local fdsets are empty and nready is zero,
2139 or we would have returned earlier. */
2140 } else {
2141 /* See what's set now after waiting */
2142 nready = lwip_selscan(maxfdp1, readset, writeset, exceptset, &lreadset, &lwriteset, &lexceptset);
2143 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready));
2144 }
2145 }
2146 }
2147
2148 lwip_select_dec_sockets_used(maxfdp1, &used_sockets);
2149 set_errno(0);
2150 if (readset) {
2151 *readset = lreadset;
2152 }
2153 if (writeset) {
2154 *writeset = lwriteset;
2155 }
2156 if (exceptset) {
2157 *exceptset = lexceptset;
2158 }
2159 return nready;
2160 }
2161 #endif /* LWIP_SOCKET_SELECT */
2162
2163 #if LWIP_SOCKET_POLL
2164 /** Options for the lwip_pollscan function. */
2165 enum lwip_pollscan_opts
2166 {
2167 /** Clear revents in each struct pollfd. */
2168 LWIP_POLLSCAN_CLEAR = 1,
2169
2170 /** Increment select_waiting in each struct lwip_sock. */
2171 LWIP_POLLSCAN_INC_WAIT = 2,
2172
2173 /** Decrement select_waiting in each struct lwip_sock. */
2174 LWIP_POLLSCAN_DEC_WAIT = 4
2175 };
2176
2177 /**
2178 * Update revents in each struct pollfd.
2179 * Optionally update select_waiting in struct lwip_sock.
2180 *
2181 * @param fds array of structures to update
2182 * @param nfds number of structures in fds
2183 * @param opts what to update and how
2184 * @return number of structures that have revents != 0
2185 */
2186 static int
lwip_pollscan(struct pollfd * fds,nfds_t nfds,enum lwip_pollscan_opts opts)2187 lwip_pollscan(struct pollfd *fds, nfds_t nfds, enum lwip_pollscan_opts opts)
2188 {
2189 int nready = 0;
2190 nfds_t fdi;
2191 struct lwip_sock *sock;
2192 SYS_ARCH_DECL_PROTECT(lev);
2193
2194 /* Go through each struct pollfd in the array. */
2195 for (fdi = 0; fdi < nfds; fdi++) {
2196 if ((opts & LWIP_POLLSCAN_CLEAR) != 0) {
2197 fds[fdi].revents = 0;
2198 }
2199
2200 /* Negative fd means the caller wants us to ignore this struct.
2201 POLLNVAL means we already detected that the fd is invalid;
2202 if another thread has since opened a new socket with that fd,
2203 we must not use that socket. */
2204 if (fds[fdi].fd >= 0 && (fds[fdi].revents & POLLNVAL) == 0) {
2205 /* First get the socket's status (protected)... */
2206 SYS_ARCH_PROTECT(lev);
2207 sock = tryget_socket_unconn_locked(fds[fdi].fd);
2208 if (sock != NULL) {
2209 void* lastdata = sock->lastdata.pbuf;
2210 s16_t rcvevent = sock->rcvevent;
2211 u16_t sendevent = sock->sendevent;
2212 u16_t errevent = sock->errevent;
2213
2214 if ((opts & LWIP_POLLSCAN_INC_WAIT) != 0) {
2215 sock->select_waiting++;
2216 if (sock->select_waiting == 0) {
2217 /* overflow - too many threads waiting */
2218 sock->select_waiting--;
2219 nready = -1;
2220 SYS_ARCH_UNPROTECT(lev);
2221 done_socket(sock);
2222 break;
2223 }
2224 } else if ((opts & LWIP_POLLSCAN_DEC_WAIT) != 0) {
2225 /* for now, handle select_waiting==0... */
2226 LWIP_ASSERT("sock->select_waiting > 0", sock->select_waiting > 0);
2227 if (sock->select_waiting > 0) {
2228 sock->select_waiting--;
2229 }
2230 }
2231 SYS_ARCH_UNPROTECT(lev);
2232 done_socket(sock);
2233
2234 /* ... then examine it: */
2235 /* See if netconn of this socket is ready for read */
2236 if ((fds[fdi].events & POLLIN) != 0 && ((lastdata != NULL) || (rcvevent > 0))) {
2237 fds[fdi].revents |= POLLIN;
2238 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for reading\n", fds[fdi].fd));
2239 }
2240 /* See if netconn of this socket is ready for write */
2241 if ((fds[fdi].events & POLLOUT) != 0 && (sendevent != 0)) {
2242 fds[fdi].revents |= POLLOUT;
2243 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for writing\n", fds[fdi].fd));
2244 }
2245 /* See if netconn of this socket had an error */
2246 if (errevent != 0) {
2247 /* POLLERR is output only. */
2248 fds[fdi].revents |= POLLERR;
2249 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_pollscan: fd=%d ready for exception\n", fds[fdi].fd));
2250 }
2251 } else {
2252 /* Not a valid socket */
2253 SYS_ARCH_UNPROTECT(lev);
2254 /* POLLNVAL is output only. */
2255 fds[fdi].revents |= POLLNVAL;
2256 return -1;
2257 }
2258 }
2259
2260 /* Will return the number of structures that have events,
2261 not the number of events. */
2262 if (fds[fdi].revents != 0) {
2263 nready++;
2264 }
2265 }
2266
2267 LWIP_ASSERT("nready >= 0", nready >= 0);
2268 return nready;
2269 }
2270
2271 #if LWIP_NETCONN_FULLDUPLEX
2272 /* Mark all sockets as used.
2273 *
2274 * All sockets are marked (and later unmarked), whether they are open or not.
2275 * This is OK as lwip_pollscan aborts select when non-open sockets are found.
2276 */
2277 static void
lwip_poll_inc_sockets_used(struct pollfd * fds,nfds_t nfds)2278 lwip_poll_inc_sockets_used(struct pollfd *fds, nfds_t nfds)
2279 {
2280 nfds_t fdi;
2281
2282 if(fds) {
2283 /* Go through each struct pollfd in the array. */
2284 for (fdi = 0; fdi < nfds; fdi++) {
2285 /* Increase the reference counter */
2286 tryget_socket_unconn(fds[fdi].fd);
2287 }
2288 }
2289 }
2290
2291 /* Let go all sockets that were marked as used when starting poll */
2292 static void
lwip_poll_dec_sockets_used(struct pollfd * fds,nfds_t nfds)2293 lwip_poll_dec_sockets_used(struct pollfd *fds, nfds_t nfds)
2294 {
2295 nfds_t fdi;
2296
2297 if(fds) {
2298 /* Go through each struct pollfd in the array. */
2299 for (fdi = 0; fdi < nfds; fdi++) {
2300 struct lwip_sock *sock = tryget_socket_unconn_nouse(fds[fdi].fd);
2301 if (sock != NULL) {
2302 done_socket(sock);
2303 }
2304 }
2305 }
2306 }
2307 #else /* LWIP_NETCONN_FULLDUPLEX */
2308 #define lwip_poll_inc_sockets_used(fds, nfds)
2309 #define lwip_poll_dec_sockets_used(fds, nfds)
2310 #endif /* LWIP_NETCONN_FULLDUPLEX */
2311
2312 int
lwip_poll(struct pollfd * fds,nfds_t nfds,int timeout)2313 lwip_poll(struct pollfd *fds, nfds_t nfds, int timeout)
2314 {
2315 u32_t waitres = 0;
2316 int nready;
2317 u32_t msectimeout;
2318 #if LWIP_NETCONN_SEM_PER_THREAD
2319 int waited = 0;
2320 #endif
2321
2322 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll(%p, %d, %d)\n",
2323 (void*)fds, (int)nfds, timeout));
2324 LWIP_ERROR("lwip_poll: invalid fds", ((fds != NULL && nfds > 0) || (fds == NULL && nfds == 0)),
2325 set_errno(EINVAL); return -1;);
2326
2327 lwip_poll_inc_sockets_used(fds, nfds);
2328
2329 /* Go through each struct pollfd to count number of structures
2330 which currently match */
2331 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_CLEAR);
2332
2333 if (nready < 0) {
2334 lwip_poll_dec_sockets_used(fds, nfds);
2335 return -1;
2336 }
2337
2338 /* If we don't have any current events, then suspend if we are supposed to */
2339 if (!nready) {
2340 API_SELECT_CB_VAR_DECLARE(select_cb);
2341
2342 if (timeout == 0) {
2343 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: no timeout, returning 0\n"));
2344 goto return_success;
2345 }
2346 API_SELECT_CB_VAR_ALLOC(select_cb, set_errno(EAGAIN); lwip_poll_dec_sockets_used(fds, nfds); return -1);
2347 memset(&API_SELECT_CB_VAR_REF(select_cb), 0, sizeof(struct lwip_select_cb));
2348
2349 /* None ready: add our semaphore to list:
2350 We don't actually need any dynamic memory. Our entry on the
2351 list is only valid while we are in this function, so it's ok
2352 to use local variables. */
2353
2354 API_SELECT_CB_VAR_REF(select_cb).poll_fds = fds;
2355 API_SELECT_CB_VAR_REF(select_cb).poll_nfds = nfds;
2356 #if LWIP_NETCONN_SEM_PER_THREAD
2357 API_SELECT_CB_VAR_REF(select_cb).sem = LWIP_NETCONN_THREAD_SEM_GET();
2358 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2359 if (sys_sem_new(&API_SELECT_CB_VAR_REF(select_cb).sem, 0) != ERR_OK) {
2360 /* failed to create semaphore */
2361 set_errno(EAGAIN);
2362 lwip_poll_dec_sockets_used(fds, nfds);
2363 API_SELECT_CB_VAR_FREE(select_cb);
2364 return -1;
2365 }
2366 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2367
2368 lwip_link_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2369
2370 /* Increase select_waiting for each socket we are interested in.
2371 Also, check for events again: there could have been events between
2372 the last scan (without us on the list) and putting us on the list! */
2373 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_INC_WAIT);
2374
2375 if (!nready) {
2376 /* Still none ready, just wait to be woken */
2377 if (timeout < 0) {
2378 /* Wait forever */
2379 msectimeout = 0;
2380 } else {
2381 /* timeout == 0 would have been handled earlier. */
2382 LWIP_ASSERT("timeout > 0", timeout > 0);
2383 msectimeout = timeout;
2384 }
2385 waitres = sys_arch_sem_wait(SELECT_SEM_PTR(API_SELECT_CB_VAR_REF(select_cb).sem), msectimeout);
2386 #if LWIP_NETCONN_SEM_PER_THREAD
2387 waited = 1;
2388 #endif
2389 }
2390
2391 /* Decrease select_waiting for each socket we are interested in,
2392 and check which events occurred while we waited. */
2393 nready = lwip_pollscan(fds, nfds, LWIP_POLLSCAN_DEC_WAIT);
2394
2395 lwip_unlink_select_cb(&API_SELECT_CB_VAR_REF(select_cb));
2396
2397 #if LWIP_NETCONN_SEM_PER_THREAD
2398 if (select_cb.sem_signalled && (!waited || (waitres == SYS_ARCH_TIMEOUT))) {
2399 /* don't leave the thread-local semaphore signalled */
2400 sys_arch_sem_wait(API_SELECT_CB_VAR_REF(select_cb).sem, 1);
2401 }
2402 #else /* LWIP_NETCONN_SEM_PER_THREAD */
2403 sys_sem_free(&API_SELECT_CB_VAR_REF(select_cb).sem);
2404 #endif /* LWIP_NETCONN_SEM_PER_THREAD */
2405 API_SELECT_CB_VAR_FREE(select_cb);
2406
2407 if (nready < 0) {
2408 /* This happens when a socket got closed while waiting */
2409 lwip_poll_dec_sockets_used(fds, nfds);
2410 return -1;
2411 }
2412
2413 if (waitres == SYS_ARCH_TIMEOUT) {
2414 /* Timeout */
2415 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: timeout expired\n"));
2416 goto return_success;
2417 }
2418 }
2419
2420 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_poll: nready=%d\n", nready));
2421 return_success:
2422 lwip_poll_dec_sockets_used(fds, nfds);
2423 set_errno(0);
2424 return nready;
2425 }
2426
2427 /**
2428 * Check whether event_callback should wake up a thread waiting in
2429 * lwip_poll.
2430 */
2431 static int
lwip_poll_should_wake(const struct lwip_select_cb * scb,int fd,int has_recvevent,int has_sendevent,int has_errevent)2432 lwip_poll_should_wake(const struct lwip_select_cb *scb, int fd, int has_recvevent, int has_sendevent, int has_errevent)
2433 {
2434 nfds_t fdi;
2435 for (fdi = 0; fdi < scb->poll_nfds; fdi++) {
2436 const struct pollfd *pollfd = &scb->poll_fds[fdi];
2437 if (pollfd->fd == fd) {
2438 /* Do not update pollfd->revents right here;
2439 that would be a data race because lwip_pollscan
2440 accesses revents without protecting. */
2441 if (has_recvevent && (pollfd->events & POLLIN) != 0) {
2442 return 1;
2443 }
2444 if (has_sendevent && (pollfd->events & POLLOUT) != 0) {
2445 return 1;
2446 }
2447 if (has_errevent) {
2448 /* POLLERR is output only. */
2449 return 1;
2450 }
2451 }
2452 }
2453 return 0;
2454 }
2455 #endif /* LWIP_SOCKET_POLL */
2456
2457 #if LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL
2458 /**
2459 * Callback registered in the netconn layer for each socket-netconn.
2460 * Processes recvevent (data available) and wakes up tasks waiting for select.
2461 *
2462 * @note for LWIP_TCPIP_CORE_LOCKING any caller of this function
2463 * must have the core lock held when signaling the following events
2464 * as they might cause select_list_cb to be checked:
2465 * NETCONN_EVT_RCVPLUS
2466 * NETCONN_EVT_SENDPLUS
2467 * NETCONN_EVT_ERROR
2468 * This requirement will be asserted in select_check_waiters()
2469 */
2470 static void
event_callback(struct netconn * conn,enum netconn_evt evt,u16_t len)2471 event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len)
2472 {
2473 int s, check_waiters;
2474 struct lwip_sock *sock;
2475 SYS_ARCH_DECL_PROTECT(lev);
2476
2477 LWIP_UNUSED_ARG(len);
2478
2479 /* Get socket */
2480 if (conn) {
2481 s = conn->socket;
2482 if (s < 0) {
2483 /* Data comes in right away after an accept, even though
2484 * the server task might not have created a new socket yet.
2485 * Just count down (or up) if that's the case and we
2486 * will use the data later. Note that only receive events
2487 * can happen before the new socket is set up. */
2488 SYS_ARCH_PROTECT(lev);
2489 if (conn->socket < 0) {
2490 if (evt == NETCONN_EVT_RCVPLUS) {
2491 /* conn->socket is -1 on initialization
2492 lwip_accept adjusts sock->recvevent if conn->socket < -1 */
2493 conn->socket--;
2494 }
2495 SYS_ARCH_UNPROTECT(lev);
2496 return;
2497 }
2498 s = conn->socket;
2499 SYS_ARCH_UNPROTECT(lev);
2500 }
2501
2502 sock = get_socket(s);
2503 if (!sock) {
2504 return;
2505 }
2506 } else {
2507 return;
2508 }
2509
2510 check_waiters = 1;
2511 SYS_ARCH_PROTECT(lev);
2512 /* Set event as required */
2513 switch (evt) {
2514 case NETCONN_EVT_RCVPLUS:
2515 sock->rcvevent++;
2516 if (sock->rcvevent > 1) {
2517 check_waiters = 0;
2518 }
2519 break;
2520 case NETCONN_EVT_RCVMINUS:
2521 sock->rcvevent--;
2522 check_waiters = 0;
2523 break;
2524 case NETCONN_EVT_SENDPLUS:
2525 if (sock->sendevent) {
2526 check_waiters = 0;
2527 }
2528 sock->sendevent = 1;
2529 break;
2530 case NETCONN_EVT_SENDMINUS:
2531 sock->sendevent = 0;
2532 check_waiters = 0;
2533 break;
2534 case NETCONN_EVT_ERROR:
2535 sock->errevent = 1;
2536 break;
2537 default:
2538 LWIP_ASSERT("unknown event", 0);
2539 break;
2540 }
2541
2542 if (sock->select_waiting && check_waiters) {
2543 /* Save which events are active */
2544 int has_recvevent, has_sendevent, has_errevent;
2545 has_recvevent = sock->rcvevent > 0;
2546 has_sendevent = sock->sendevent != 0;
2547 has_errevent = sock->errevent != 0;
2548 SYS_ARCH_UNPROTECT(lev);
2549 /* Check any select calls waiting on this socket */
2550 select_check_waiters(s, has_recvevent, has_sendevent, has_errevent);
2551 } else {
2552 SYS_ARCH_UNPROTECT(lev);
2553 }
2554 done_socket(sock);
2555 }
2556
2557 /**
2558 * Check if any select waiters are waiting on this socket and its events
2559 *
2560 * @note on synchronization of select_cb_list:
2561 * LWIP_TCPIP_CORE_LOCKING: the select_cb_list must only be accessed while holding
2562 * the core lock. We do a single pass through the list and signal any waiters.
2563 * Core lock should already be held when calling here!!!!
2564
2565 * !LWIP_TCPIP_CORE_LOCKING: we use SYS_ARCH_PROTECT but unlock on each iteration
2566 * of the loop, thus creating a possibility where a thread could modify the
2567 * select_cb_list during our UNPROTECT/PROTECT. We use a generational counter to
2568 * detect this change and restart the list walk. The list is expected to be small
2569 */
select_check_waiters(int s,int has_recvevent,int has_sendevent,int has_errevent)2570 static void select_check_waiters(int s, int has_recvevent, int has_sendevent, int has_errevent)
2571 {
2572 struct lwip_select_cb *scb;
2573 #if !LWIP_TCPIP_CORE_LOCKING
2574 int last_select_cb_ctr;
2575 SYS_ARCH_DECL_PROTECT(lev);
2576 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2577
2578 LWIP_ASSERT_CORE_LOCKED();
2579
2580 #if !LWIP_TCPIP_CORE_LOCKING
2581 SYS_ARCH_PROTECT(lev);
2582 again:
2583 /* remember the state of select_cb_list to detect changes */
2584 last_select_cb_ctr = select_cb_ctr;
2585 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2586 for (scb = select_cb_list; scb != NULL; scb = scb->next) {
2587 if (scb->sem_signalled == 0) {
2588 /* semaphore not signalled yet */
2589 int do_signal = 0;
2590 #if LWIP_SOCKET_POLL
2591 if (scb->poll_fds != NULL) {
2592 do_signal = lwip_poll_should_wake(scb, s, has_recvevent, has_sendevent, has_errevent);
2593 }
2594 #endif /* LWIP_SOCKET_POLL */
2595 #if LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL
2596 else
2597 #endif /* LWIP_SOCKET_SELECT && LWIP_SOCKET_POLL */
2598 #if LWIP_SOCKET_SELECT
2599 {
2600 /* Test this select call for our socket */
2601 if (has_recvevent) {
2602 if (scb->readset && FD_ISSET(s, scb->readset)) {
2603 do_signal = 1;
2604 }
2605 }
2606 if (has_sendevent) {
2607 if (!do_signal && scb->writeset && FD_ISSET(s, scb->writeset)) {
2608 do_signal = 1;
2609 }
2610 }
2611 if (has_errevent) {
2612 if (!do_signal && scb->exceptset && FD_ISSET(s, scb->exceptset)) {
2613 do_signal = 1;
2614 }
2615 }
2616 }
2617 #endif /* LWIP_SOCKET_SELECT */
2618 if (do_signal) {
2619 scb->sem_signalled = 1;
2620 /* For !LWIP_TCPIP_CORE_LOCKING, we don't call SYS_ARCH_UNPROTECT() before signaling
2621 the semaphore, as this might lead to the select thread taking itself off the list,
2622 invalidating the semaphore. */
2623 sys_sem_signal(SELECT_SEM_PTR(scb->sem));
2624 }
2625 }
2626 #if LWIP_TCPIP_CORE_LOCKING
2627 }
2628 #else
2629 /* unlock interrupts with each step */
2630 SYS_ARCH_UNPROTECT(lev);
2631 /* this makes sure interrupt protection time is short */
2632 SYS_ARCH_PROTECT(lev);
2633 if (last_select_cb_ctr != select_cb_ctr) {
2634 /* someone has changed select_cb_list, restart at the beginning */
2635 goto again;
2636 }
2637 /* remember the state of select_cb_list to detect changes */
2638 last_select_cb_ctr = select_cb_ctr;
2639 }
2640 SYS_ARCH_UNPROTECT(lev);
2641 #endif
2642 }
2643 #endif /* LWIP_SOCKET_SELECT || LWIP_SOCKET_POLL */
2644
2645 /**
2646 * Close one end of a full-duplex connection.
2647 */
2648 int
lwip_shutdown(int s,int how)2649 lwip_shutdown(int s, int how)
2650 {
2651 struct lwip_sock *sock;
2652 err_t err;
2653 u8_t shut_rx = 0, shut_tx = 0;
2654
2655 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_shutdown(%d, how=%d)\n", s, how));
2656
2657 sock = get_socket(s);
2658 if (!sock) {
2659 return -1;
2660 }
2661
2662 if (sock->conn != NULL) {
2663 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
2664 set_errno(EOPNOTSUPP);
2665 done_socket(sock);
2666 return -1;
2667 }
2668 } else {
2669 set_errno(ENOTCONN);
2670 done_socket(sock);
2671 return -1;
2672 }
2673
2674 if (how == SHUT_RD) {
2675 shut_rx = 1;
2676 } else if (how == SHUT_WR) {
2677 shut_tx = 1;
2678 } else if (how == SHUT_RDWR) {
2679 shut_rx = 1;
2680 shut_tx = 1;
2681 } else {
2682 set_errno(EINVAL);
2683 done_socket(sock);
2684 return -1;
2685 }
2686 err = netconn_shutdown(sock->conn, shut_rx, shut_tx);
2687
2688 set_errno(err_to_errno(err));
2689 done_socket(sock);
2690 return (err == ERR_OK ? 0 : -1);
2691 }
2692
2693 static int
lwip_getaddrname(int s,struct sockaddr * name,socklen_t * namelen,u8_t local)2694 lwip_getaddrname(int s, struct sockaddr *name, socklen_t *namelen, u8_t local)
2695 {
2696 struct lwip_sock *sock;
2697 union sockaddr_aligned saddr;
2698 ip_addr_t naddr;
2699 u16_t port;
2700 err_t err;
2701
2702 sock = get_socket(s);
2703 if (!sock) {
2704 return -1;
2705 }
2706
2707 /* get the IP address and port */
2708 err = netconn_getaddr(sock->conn, &naddr, &port, local);
2709 if (err != ERR_OK) {
2710 set_errno(err_to_errno(err));
2711 done_socket(sock);
2712 return -1;
2713 }
2714
2715 #if LWIP_IPV4 && LWIP_IPV6
2716 /* Dual-stack: Map IPv4 addresses to IPv4 mapped IPv6 */
2717 if (NETCONNTYPE_ISIPV6(netconn_type(sock->conn)) &&
2718 IP_IS_V4_VAL(naddr)) {
2719 ip4_2_ipv4_mapped_ipv6(ip_2_ip6(&naddr), ip_2_ip4(&naddr));
2720 IP_SET_TYPE_VAL(naddr, IPADDR_TYPE_V6);
2721 }
2722 #endif /* LWIP_IPV4 && LWIP_IPV6 */
2723
2724 IPADDR_PORT_TO_SOCKADDR(&saddr, &naddr, port);
2725
2726 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getaddrname(%d, addr=", s));
2727 ip_addr_debug_print_val(SOCKETS_DEBUG, naddr);
2728 LWIP_DEBUGF(SOCKETS_DEBUG, (" port=%"U16_F")\n", port));
2729
2730 if (*namelen > saddr.sa.sa_len) {
2731 *namelen = saddr.sa.sa_len;
2732 }
2733 MEMCPY(name, &saddr, *namelen);
2734
2735 set_errno(0);
2736 done_socket(sock);
2737 return 0;
2738 }
2739
2740 int
lwip_getpeername(int s,struct sockaddr * name,socklen_t * namelen)2741 lwip_getpeername(int s, struct sockaddr *name, socklen_t *namelen)
2742 {
2743 return lwip_getaddrname(s, name, namelen, 0);
2744 }
2745
2746 int
lwip_getsockname(int s,struct sockaddr * name,socklen_t * namelen)2747 lwip_getsockname(int s, struct sockaddr *name, socklen_t *namelen)
2748 {
2749 return lwip_getaddrname(s, name, namelen, 1);
2750 }
2751
2752 int
lwip_getsockopt(int s,int level,int optname,void * optval,socklen_t * optlen)2753 lwip_getsockopt(int s, int level, int optname, void *optval, socklen_t *optlen)
2754 {
2755 int err;
2756 struct lwip_sock *sock = get_socket(s);
2757 #if !LWIP_TCPIP_CORE_LOCKING
2758 err_t cberr;
2759 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
2760 #endif /* !LWIP_TCPIP_CORE_LOCKING */
2761
2762 if (!sock) {
2763 return -1;
2764 }
2765
2766 if ((NULL == optval) || (NULL == optlen)) {
2767 set_errno(EFAULT);
2768 done_socket(sock);
2769 return -1;
2770 }
2771
2772 #if LWIP_TCPIP_CORE_LOCKING
2773 /* core-locking can just call the -impl function */
2774 LOCK_TCPIP_CORE();
2775 err = lwip_getsockopt_impl(s, level, optname, optval, optlen);
2776 UNLOCK_TCPIP_CORE();
2777
2778 #else /* LWIP_TCPIP_CORE_LOCKING */
2779
2780 #if LWIP_MPU_COMPATIBLE
2781 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
2782 if (*optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
2783 set_errno(ENOBUFS);
2784 done_socket(sock);
2785 return -1;
2786 }
2787 #endif /* LWIP_MPU_COMPATIBLE */
2788
2789 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
2790 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
2791 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
2792 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
2793 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = *optlen;
2794 #if !LWIP_MPU_COMPATIBLE
2795 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.p = optval;
2796 #endif /* !LWIP_MPU_COMPATIBLE */
2797 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
2798 #if LWIP_NETCONN_SEM_PER_THREAD
2799 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
2800 #else
2801 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
2802 #endif
2803 cberr = tcpip_callback(lwip_getsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
2804 if (cberr != ERR_OK) {
2805 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2806 set_errno(err_to_errno(cberr));
2807 done_socket(sock);
2808 return -1;
2809 }
2810 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
2811
2812 /* write back optlen and optval */
2813 *optlen = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen;
2814 #if LWIP_MPU_COMPATIBLE
2815 MEMCPY(optval, LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval,
2816 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen);
2817 #endif /* LWIP_MPU_COMPATIBLE */
2818
2819 /* maybe lwip_getsockopt_impl has changed err */
2820 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
2821 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
2822 #endif /* LWIP_TCPIP_CORE_LOCKING */
2823
2824 set_errno(err);
2825 done_socket(sock);
2826 return err ? -1 : 0;
2827 }
2828
2829 #if !LWIP_TCPIP_CORE_LOCKING
2830 /** lwip_getsockopt_callback: only used without CORE_LOCKING
2831 * to get into the tcpip_thread
2832 */
2833 static void
lwip_getsockopt_callback(void * arg)2834 lwip_getsockopt_callback(void *arg)
2835 {
2836 struct lwip_setgetsockopt_data *data;
2837 LWIP_ASSERT("arg != NULL", arg != NULL);
2838 data = (struct lwip_setgetsockopt_data *)arg;
2839
2840 data->err = lwip_getsockopt_impl(data->s, data->level, data->optname,
2841 #if LWIP_MPU_COMPATIBLE
2842 data->optval,
2843 #else /* LWIP_MPU_COMPATIBLE */
2844 data->optval.p,
2845 #endif /* LWIP_MPU_COMPATIBLE */
2846 &data->optlen);
2847
2848 sys_sem_signal((sys_sem_t *)(data->completed_sem));
2849 }
2850 #endif /* LWIP_TCPIP_CORE_LOCKING */
2851
2852 static int
lwip_sockopt_to_ipopt(int optname)2853 lwip_sockopt_to_ipopt(int optname)
2854 {
2855 /* Map SO_* values to our internal SOF_* values
2856 * We should not rely on #defines in socket.h
2857 * being in sync with ip.h.
2858 */
2859 switch (optname) {
2860 case SO_BROADCAST:
2861 return SOF_BROADCAST;
2862 case SO_KEEPALIVE:
2863 return SOF_KEEPALIVE;
2864 case SO_REUSEADDR:
2865 return SOF_REUSEADDR;
2866 default:
2867 LWIP_ASSERT("Unknown socket option", 0);
2868 return 0;
2869 }
2870 }
2871
2872 /** lwip_getsockopt_impl: the actual implementation of getsockopt:
2873 * same argument as lwip_getsockopt, either called directly or through callback
2874 */
2875 static int
lwip_getsockopt_impl(int s,int level,int optname,void * optval,socklen_t * optlen)2876 lwip_getsockopt_impl(int s, int level, int optname, void *optval, socklen_t *optlen)
2877 {
2878 int err = 0;
2879 struct lwip_sock *sock = tryget_socket(s);
2880 if (!sock) {
2881 return EBADF;
2882 }
2883
2884 #ifdef LWIP_HOOK_SOCKETS_GETSOCKOPT
2885 if (LWIP_HOOK_SOCKETS_GETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
2886 return err;
2887 }
2888 #endif
2889
2890 switch (level) {
2891
2892 /* Level: SOL_SOCKET */
2893 case SOL_SOCKET:
2894 switch (optname) {
2895
2896 #if LWIP_TCP
2897 case SO_ACCEPTCONN:
2898 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2899 if (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_TCP) {
2900 done_socket(sock);
2901 return ENOPROTOOPT;
2902 }
2903 if ((sock->conn->pcb.tcp != NULL) && (sock->conn->pcb.tcp->state == LISTEN)) {
2904 *(int *)optval = 1;
2905 } else {
2906 *(int *)optval = 0;
2907 }
2908 break;
2909 #endif /* LWIP_TCP */
2910
2911 /* The option flags */
2912 case SO_BROADCAST:
2913 case SO_KEEPALIVE:
2914 #if SO_REUSE
2915 case SO_REUSEADDR:
2916 #endif /* SO_REUSE */
2917 if ((optname == SO_BROADCAST) &&
2918 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
2919 done_socket(sock);
2920 return ENOPROTOOPT;
2921 }
2922
2923 optname = lwip_sockopt_to_ipopt(optname);
2924
2925 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
2926 *(int *)optval = ip_get_option(sock->conn->pcb.ip, optname);
2927 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, optname=0x%x, ..) = %s\n",
2928 s, optname, (*(int *)optval ? "on" : "off")));
2929 break;
2930
2931 case SO_TYPE:
2932 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2933 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
2934 case NETCONN_RAW:
2935 *(int *)optval = SOCK_RAW;
2936 break;
2937 case NETCONN_TCP:
2938 *(int *)optval = SOCK_STREAM;
2939 break;
2940 case NETCONN_UDP:
2941 *(int *)optval = SOCK_DGRAM;
2942 break;
2943 default: /* unrecognized socket type */
2944 *(int *)optval = netconn_type(sock->conn);
2945 LWIP_DEBUGF(SOCKETS_DEBUG,
2946 ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE): unrecognized socket type %d\n",
2947 s, *(int *)optval));
2948 } /* switch (netconn_type(sock->conn)) */
2949 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_TYPE) = %d\n",
2950 s, *(int *)optval));
2951 break;
2952
2953 case SO_ERROR:
2954 LWIP_SOCKOPT_CHECK_OPTLEN(sock, *optlen, int);
2955 *(int *)optval = err_to_errno(netconn_err(sock->conn));
2956 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, SO_ERROR) = %d\n",
2957 s, *(int *)optval));
2958 break;
2959
2960 #if LWIP_SO_SNDTIMEO
2961 case SO_SNDTIMEO:
2962 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2963 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_sendtimeout(sock->conn));
2964 break;
2965 #endif /* LWIP_SO_SNDTIMEO */
2966 #if LWIP_SO_RCVTIMEO
2967 case SO_RCVTIMEO:
2968 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
2969 LWIP_SO_SNDRCVTIMEO_SET(optval, netconn_get_recvtimeout(sock->conn));
2970 break;
2971 #endif /* LWIP_SO_RCVTIMEO */
2972 #if LWIP_SO_RCVBUF
2973 case SO_RCVBUF:
2974 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
2975 *(int *)optval = netconn_get_recvbufsize(sock->conn);
2976 break;
2977 #endif /* LWIP_SO_RCVBUF */
2978 #if LWIP_SO_LINGER
2979 case SO_LINGER: {
2980 s16_t conn_linger;
2981 struct linger *linger = (struct linger *)optval;
2982 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, struct linger);
2983 conn_linger = sock->conn->linger;
2984 if (conn_linger >= 0) {
2985 linger->l_onoff = 1;
2986 linger->l_linger = (int)conn_linger;
2987 } else {
2988 linger->l_onoff = 0;
2989 linger->l_linger = 0;
2990 }
2991 }
2992 break;
2993 #endif /* LWIP_SO_LINGER */
2994 #if LWIP_UDP
2995 case SO_NO_CHECK:
2996 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_UDP);
2997 #if LWIP_UDPLITE
2998 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
2999 /* this flag is only available for UDP, not for UDP lite */
3000 done_socket(sock);
3001 return EAFNOSUPPORT;
3002 }
3003 #endif /* LWIP_UDPLITE */
3004 *(int *)optval = udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM) ? 1 : 0;
3005 break;
3006 #endif /* LWIP_UDP*/
3007 default:
3008 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3009 s, optname));
3010 err = ENOPROTOOPT;
3011 break;
3012 } /* switch (optname) */
3013 break;
3014
3015 /* Level: IPPROTO_IP */
3016 case IPPROTO_IP:
3017 switch (optname) {
3018 case IP_TTL:
3019 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3020 *(int *)optval = sock->conn->pcb.ip->ttl;
3021 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TTL) = %d\n",
3022 s, *(int *)optval));
3023 break;
3024 case IP_TOS:
3025 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3026 *(int *)optval = sock->conn->pcb.ip->tos;
3027 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_TOS) = %d\n",
3028 s, *(int *)optval));
3029 break;
3030 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3031 case IP_MULTICAST_TTL:
3032 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3033 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3034 done_socket(sock);
3035 return ENOPROTOOPT;
3036 }
3037 *(u8_t *)optval = udp_get_multicast_ttl(sock->conn->pcb.udp);
3038 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_TTL) = %d\n",
3039 s, *(int *)optval));
3040 break;
3041 case IP_MULTICAST_IF:
3042 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, struct in_addr);
3043 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_UDP) {
3044 done_socket(sock);
3045 return ENOPROTOOPT;
3046 }
3047 inet_addr_from_ip4addr((struct in_addr *)optval, udp_get_multicast_netif_addr(sock->conn->pcb.udp));
3048 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_IF) = 0x%"X32_F"\n",
3049 s, *(u32_t *)optval));
3050 break;
3051 case IP_MULTICAST_LOOP:
3052 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, u8_t);
3053 if ((sock->conn->pcb.udp->flags & UDP_FLAGS_MULTICAST_LOOP) != 0) {
3054 *(u8_t *)optval = 1;
3055 } else {
3056 *(u8_t *)optval = 0;
3057 }
3058 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, IP_MULTICAST_LOOP) = %d\n",
3059 s, *(int *)optval));
3060 break;
3061 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3062 default:
3063 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3064 s, optname));
3065 err = ENOPROTOOPT;
3066 break;
3067 } /* switch (optname) */
3068 break;
3069
3070 #if LWIP_TCP
3071 /* Level: IPPROTO_TCP */
3072 case IPPROTO_TCP:
3073 /* Special case: all IPPROTO_TCP option take an int */
3074 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_TCP);
3075 if (sock->conn->pcb.tcp->state == LISTEN) {
3076 done_socket(sock);
3077 return EINVAL;
3078 }
3079 switch (optname) {
3080 case TCP_NODELAY:
3081 *(int *)optval = tcp_nagle_disabled(sock->conn->pcb.tcp);
3082 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_NODELAY) = %s\n",
3083 s, (*(int *)optval) ? "on" : "off") );
3084 break;
3085 case TCP_KEEPALIVE:
3086 *(int *)optval = (int)sock->conn->pcb.tcp->keep_idle;
3087 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) = %d\n",
3088 s, *(int *)optval));
3089 break;
3090
3091 #if LWIP_TCP_KEEPALIVE
3092 case TCP_KEEPIDLE:
3093 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_idle / 1000);
3094 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) = %d\n",
3095 s, *(int *)optval));
3096 break;
3097 case TCP_KEEPINTVL:
3098 *(int *)optval = (int)(sock->conn->pcb.tcp->keep_intvl / 1000);
3099 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) = %d\n",
3100 s, *(int *)optval));
3101 break;
3102 case TCP_KEEPCNT:
3103 *(int *)optval = (int)sock->conn->pcb.tcp->keep_cnt;
3104 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) = %d\n",
3105 s, *(int *)optval));
3106 break;
3107 #endif /* LWIP_TCP_KEEPALIVE */
3108 default:
3109 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3110 s, optname));
3111 err = ENOPROTOOPT;
3112 break;
3113 } /* switch (optname) */
3114 break;
3115 #endif /* LWIP_TCP */
3116
3117 #if LWIP_IPV6
3118 /* Level: IPPROTO_IPV6 */
3119 case IPPROTO_IPV6:
3120 switch (optname) {
3121 case IPV6_V6ONLY:
3122 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, *optlen, int);
3123 *(int *)optval = (netconn_get_ipv6only(sock->conn) ? 1 : 0);
3124 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY) = %d\n",
3125 s, *(int *)optval));
3126 break;
3127 default:
3128 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3129 s, optname));
3130 err = ENOPROTOOPT;
3131 break;
3132 } /* switch (optname) */
3133 break;
3134 #endif /* LWIP_IPV6 */
3135
3136 #if LWIP_UDP && LWIP_UDPLITE
3137 /* Level: IPPROTO_UDPLITE */
3138 case IPPROTO_UDPLITE:
3139 /* Special case: all IPPROTO_UDPLITE option take an int */
3140 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, *optlen, int);
3141 /* If this is no UDP lite socket, ignore any options. */
3142 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3143 done_socket(sock);
3144 return ENOPROTOOPT;
3145 }
3146 switch (optname) {
3147 case UDPLITE_SEND_CSCOV:
3148 *(int *)optval = sock->conn->pcb.udp->chksum_len_tx;
3149 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) = %d\n",
3150 s, (*(int *)optval)) );
3151 break;
3152 case UDPLITE_RECV_CSCOV:
3153 *(int *)optval = sock->conn->pcb.udp->chksum_len_rx;
3154 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) = %d\n",
3155 s, (*(int *)optval)) );
3156 break;
3157 default:
3158 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3159 s, optname));
3160 err = ENOPROTOOPT;
3161 break;
3162 } /* switch (optname) */
3163 break;
3164 #endif /* LWIP_UDP */
3165 /* Level: IPPROTO_RAW */
3166 case IPPROTO_RAW:
3167 switch (optname) {
3168 #if LWIP_IPV6 && LWIP_RAW
3169 case IPV6_CHECKSUM:
3170 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, *optlen, int, NETCONN_RAW);
3171 if (sock->conn->pcb.raw->chksum_reqd == 0) {
3172 *(int *)optval = -1;
3173 } else {
3174 *(int *)optval = sock->conn->pcb.raw->chksum_offset;
3175 }
3176 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM) = %d\n",
3177 s, (*(int *)optval)) );
3178 break;
3179 #endif /* LWIP_IPV6 && LWIP_RAW */
3180 default:
3181 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3182 s, optname));
3183 err = ENOPROTOOPT;
3184 break;
3185 } /* switch (optname) */
3186 break;
3187 default:
3188 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_getsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3189 s, level, optname));
3190 err = ENOPROTOOPT;
3191 break;
3192 } /* switch (level) */
3193
3194 done_socket(sock);
3195 return err;
3196 }
3197
3198 int
lwip_setsockopt(int s,int level,int optname,const void * optval,socklen_t optlen)3199 lwip_setsockopt(int s, int level, int optname, const void *optval, socklen_t optlen)
3200 {
3201 int err = 0;
3202 struct lwip_sock *sock = get_socket(s);
3203 #if !LWIP_TCPIP_CORE_LOCKING
3204 err_t cberr;
3205 LWIP_SETGETSOCKOPT_DATA_VAR_DECLARE(data);
3206 #endif /* !LWIP_TCPIP_CORE_LOCKING */
3207
3208 if (!sock) {
3209 return -1;
3210 }
3211
3212 if (NULL == optval) {
3213 set_errno(EFAULT);
3214 done_socket(sock);
3215 return -1;
3216 }
3217
3218 #if LWIP_TCPIP_CORE_LOCKING
3219 /* core-locking can just call the -impl function */
3220 LOCK_TCPIP_CORE();
3221 err = lwip_setsockopt_impl(s, level, optname, optval, optlen);
3222 UNLOCK_TCPIP_CORE();
3223
3224 #else /* LWIP_TCPIP_CORE_LOCKING */
3225
3226 #if LWIP_MPU_COMPATIBLE
3227 /* MPU_COMPATIBLE copies the optval data, so check for max size here */
3228 if (optlen > LWIP_SETGETSOCKOPT_MAXOPTLEN) {
3229 set_errno(ENOBUFS);
3230 done_socket(sock);
3231 return -1;
3232 }
3233 #endif /* LWIP_MPU_COMPATIBLE */
3234
3235 LWIP_SETGETSOCKOPT_DATA_VAR_ALLOC(data, sock);
3236 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).s = s;
3237 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).level = level;
3238 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optname = optname;
3239 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optlen = optlen;
3240 #if LWIP_MPU_COMPATIBLE
3241 MEMCPY(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval, optval, optlen);
3242 #else /* LWIP_MPU_COMPATIBLE */
3243 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).optval.pc = (const void *)optval;
3244 #endif /* LWIP_MPU_COMPATIBLE */
3245 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err = 0;
3246 #if LWIP_NETCONN_SEM_PER_THREAD
3247 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = LWIP_NETCONN_THREAD_SEM_GET();
3248 #else
3249 LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem = &sock->conn->op_completed;
3250 #endif
3251 cberr = tcpip_callback(lwip_setsockopt_callback, &LWIP_SETGETSOCKOPT_DATA_VAR_REF(data));
3252 if (cberr != ERR_OK) {
3253 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3254 set_errno(err_to_errno(cberr));
3255 done_socket(sock);
3256 return -1;
3257 }
3258 sys_arch_sem_wait((sys_sem_t *)(LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).completed_sem), 0);
3259
3260 /* maybe lwip_setsockopt_impl has changed err */
3261 err = LWIP_SETGETSOCKOPT_DATA_VAR_REF(data).err;
3262 LWIP_SETGETSOCKOPT_DATA_VAR_FREE(data);
3263 #endif /* LWIP_TCPIP_CORE_LOCKING */
3264
3265 set_errno(err);
3266 done_socket(sock);
3267 return err ? -1 : 0;
3268 }
3269
3270 #if !LWIP_TCPIP_CORE_LOCKING
3271 /** lwip_setsockopt_callback: only used without CORE_LOCKING
3272 * to get into the tcpip_thread
3273 */
3274 static void
lwip_setsockopt_callback(void * arg)3275 lwip_setsockopt_callback(void *arg)
3276 {
3277 struct lwip_setgetsockopt_data *data;
3278 LWIP_ASSERT("arg != NULL", arg != NULL);
3279 data = (struct lwip_setgetsockopt_data *)arg;
3280
3281 data->err = lwip_setsockopt_impl(data->s, data->level, data->optname,
3282 #if LWIP_MPU_COMPATIBLE
3283 data->optval,
3284 #else /* LWIP_MPU_COMPATIBLE */
3285 data->optval.pc,
3286 #endif /* LWIP_MPU_COMPATIBLE */
3287 data->optlen);
3288
3289 sys_sem_signal((sys_sem_t *)(data->completed_sem));
3290 }
3291 #endif /* LWIP_TCPIP_CORE_LOCKING */
3292
3293 /** lwip_setsockopt_impl: the actual implementation of setsockopt:
3294 * same argument as lwip_setsockopt, either called directly or through callback
3295 */
3296 static int
lwip_setsockopt_impl(int s,int level,int optname,const void * optval,socklen_t optlen)3297 lwip_setsockopt_impl(int s, int level, int optname, const void *optval, socklen_t optlen)
3298 {
3299 int err = 0;
3300 struct lwip_sock *sock = tryget_socket(s);
3301 if (!sock) {
3302 return EBADF;
3303 }
3304
3305 #ifdef LWIP_HOOK_SOCKETS_SETSOCKOPT
3306 if (LWIP_HOOK_SOCKETS_SETSOCKOPT(s, sock, level, optname, optval, optlen, &err)) {
3307 return err;
3308 }
3309 #endif
3310
3311 switch (level) {
3312
3313 /* Level: SOL_SOCKET */
3314 case SOL_SOCKET:
3315 switch (optname) {
3316
3317 /* SO_ACCEPTCONN is get-only */
3318
3319 /* The option flags */
3320 case SO_BROADCAST:
3321 case SO_KEEPALIVE:
3322 #if SO_REUSE
3323 case SO_REUSEADDR:
3324 #endif /* SO_REUSE */
3325 if ((optname == SO_BROADCAST) &&
3326 (NETCONNTYPE_GROUP(sock->conn->type) != NETCONN_UDP)) {
3327 done_socket(sock);
3328 return ENOPROTOOPT;
3329 }
3330
3331 optname = lwip_sockopt_to_ipopt(optname);
3332
3333 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3334 if (*(const int *)optval) {
3335 ip_set_option(sock->conn->pcb.ip, optname);
3336 } else {
3337 ip_reset_option(sock->conn->pcb.ip, optname);
3338 }
3339 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, optname=0x%x, ..) -> %s\n",
3340 s, optname, (*(const int *)optval ? "on" : "off")));
3341 break;
3342
3343 /* SO_TYPE is get-only */
3344 /* SO_ERROR is get-only */
3345
3346 #if LWIP_SO_SNDTIMEO
3347 case SO_SNDTIMEO: {
3348 long ms_long;
3349 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3350 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3351 if (ms_long < 0) {
3352 done_socket(sock);
3353 return EINVAL;
3354 }
3355 netconn_set_sendtimeout(sock->conn, ms_long);
3356 break;
3357 }
3358 #endif /* LWIP_SO_SNDTIMEO */
3359 #if LWIP_SO_RCVTIMEO
3360 case SO_RCVTIMEO: {
3361 long ms_long;
3362 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, LWIP_SO_SNDRCVTIMEO_OPTTYPE);
3363 ms_long = LWIP_SO_SNDRCVTIMEO_GET_MS(optval);
3364 if (ms_long < 0) {
3365 done_socket(sock);
3366 return EINVAL;
3367 }
3368 netconn_set_recvtimeout(sock->conn, (u32_t)ms_long);
3369 break;
3370 }
3371 #endif /* LWIP_SO_RCVTIMEO */
3372 #if LWIP_SO_RCVBUF
3373 case SO_RCVBUF:
3374 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, int);
3375 netconn_set_recvbufsize(sock->conn, *(const int *)optval);
3376 break;
3377 #endif /* LWIP_SO_RCVBUF */
3378 #if LWIP_SO_LINGER
3379 case SO_LINGER: {
3380 const struct linger *linger = (const struct linger *)optval;
3381 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct linger);
3382 if (linger->l_onoff) {
3383 int lingersec = linger->l_linger;
3384 if (lingersec < 0) {
3385 done_socket(sock);
3386 return EINVAL;
3387 }
3388 if (lingersec > 0xFFFF) {
3389 lingersec = 0xFFFF;
3390 }
3391 sock->conn->linger = (s16_t)lingersec;
3392 } else {
3393 sock->conn->linger = -1;
3394 }
3395 }
3396 break;
3397 #endif /* LWIP_SO_LINGER */
3398 #if LWIP_UDP
3399 case SO_NO_CHECK:
3400 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3401 #if LWIP_UDPLITE
3402 if (udp_is_flag_set(sock->conn->pcb.udp, UDP_FLAGS_UDPLITE)) {
3403 /* this flag is only available for UDP, not for UDP lite */
3404 done_socket(sock);
3405 return EAFNOSUPPORT;
3406 }
3407 #endif /* LWIP_UDPLITE */
3408 if (*(const int *)optval) {
3409 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3410 } else {
3411 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
3412 }
3413 break;
3414 #endif /* LWIP_UDP */
3415 case SO_BINDTODEVICE: {
3416 const struct ifreq *iface;
3417 struct netif *n = NULL;
3418
3419 LWIP_SOCKOPT_CHECK_OPTLEN_CONN(sock, optlen, struct ifreq);
3420
3421 iface = (const struct ifreq *)optval;
3422 if (iface->ifr_name[0] != 0) {
3423 n = netif_find(iface->ifr_name);
3424 if (n == NULL) {
3425 done_socket(sock);
3426 return ENODEV;
3427 }
3428 }
3429
3430 switch (NETCONNTYPE_GROUP(netconn_type(sock->conn))) {
3431 #if LWIP_TCP
3432 case NETCONN_TCP:
3433 tcp_bind_netif(sock->conn->pcb.tcp, n);
3434 break;
3435 #endif
3436 #if LWIP_UDP
3437 case NETCONN_UDP:
3438 udp_bind_netif(sock->conn->pcb.udp, n);
3439 break;
3440 #endif
3441 #if LWIP_RAW
3442 case NETCONN_RAW:
3443 raw_bind_netif(sock->conn->pcb.raw, n);
3444 break;
3445 #endif
3446 default:
3447 LWIP_ASSERT("Unhandled netconn type in SO_BINDTODEVICE", 0);
3448 break;
3449 }
3450 }
3451 break;
3452 default:
3453 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, SOL_SOCKET, UNIMPL: optname=0x%x, ..)\n",
3454 s, optname));
3455 err = ENOPROTOOPT;
3456 break;
3457 } /* switch (optname) */
3458 break;
3459
3460 /* Level: IPPROTO_IP */
3461 case IPPROTO_IP:
3462 switch (optname) {
3463 case IP_TTL:
3464 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3465 sock->conn->pcb.ip->ttl = (u8_t)(*(const int *)optval);
3466 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TTL, ..) -> %d\n",
3467 s, sock->conn->pcb.ip->ttl));
3468 break;
3469 case IP_TOS:
3470 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3471 sock->conn->pcb.ip->tos = (u8_t)(*(const int *)optval);
3472 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, IP_TOS, ..)-> %d\n",
3473 s, sock->conn->pcb.ip->tos));
3474 break;
3475 #if LWIP_NETBUF_RECVINFO
3476 case IP_PKTINFO:
3477 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_UDP);
3478 if (*(const int *)optval) {
3479 sock->conn->flags |= NETCONN_FLAG_PKTINFO;
3480 } else {
3481 sock->conn->flags &= ~NETCONN_FLAG_PKTINFO;
3482 }
3483 break;
3484 #endif /* LWIP_NETBUF_RECVINFO */
3485 #if LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP
3486 case IP_MULTICAST_TTL:
3487 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3488 udp_set_multicast_ttl(sock->conn->pcb.udp, (u8_t)(*(const u8_t *)optval));
3489 break;
3490 case IP_MULTICAST_IF: {
3491 ip4_addr_t if_addr;
3492 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct in_addr, NETCONN_UDP);
3493 inet_addr_to_ip4addr(&if_addr, (const struct in_addr *)optval);
3494 udp_set_multicast_netif_addr(sock->conn->pcb.udp, &if_addr);
3495 }
3496 break;
3497 case IP_MULTICAST_LOOP:
3498 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, u8_t, NETCONN_UDP);
3499 if (*(const u8_t *)optval) {
3500 udp_set_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3501 } else {
3502 udp_clear_flags(sock->conn->pcb.udp, UDP_FLAGS_MULTICAST_LOOP);
3503 }
3504 break;
3505 #endif /* LWIP_IPV4 && LWIP_MULTICAST_TX_OPTIONS && LWIP_UDP */
3506 #if LWIP_IGMP
3507 case IP_ADD_MEMBERSHIP:
3508 case IP_DROP_MEMBERSHIP: {
3509 /* If this is a TCP or a RAW socket, ignore these options. */
3510 err_t igmp_err;
3511 const struct ip_mreq *imr = (const struct ip_mreq *)optval;
3512 ip4_addr_t if_addr;
3513 ip4_addr_t multi_addr;
3514 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ip_mreq, NETCONN_UDP);
3515 inet_addr_to_ip4addr(&if_addr, &imr->imr_interface);
3516 inet_addr_to_ip4addr(&multi_addr, &imr->imr_multiaddr);
3517 if (optname == IP_ADD_MEMBERSHIP) {
3518 if (!lwip_socket_register_membership(s, &if_addr, &multi_addr)) {
3519 /* cannot track membership (out of memory) */
3520 err = ENOMEM;
3521 igmp_err = ERR_OK;
3522 } else {
3523 igmp_err = igmp_joingroup(&if_addr, &multi_addr);
3524 }
3525 } else {
3526 igmp_err = igmp_leavegroup(&if_addr, &multi_addr);
3527 lwip_socket_unregister_membership(s, &if_addr, &multi_addr);
3528 }
3529 if (igmp_err != ERR_OK) {
3530 err = EADDRNOTAVAIL;
3531 }
3532 }
3533 break;
3534 #endif /* LWIP_IGMP */
3535 default:
3536 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IP, UNIMPL: optname=0x%x, ..)\n",
3537 s, optname));
3538 err = ENOPROTOOPT;
3539 break;
3540 } /* switch (optname) */
3541 break;
3542
3543 #if LWIP_TCP
3544 /* Level: IPPROTO_TCP */
3545 case IPPROTO_TCP:
3546 /* Special case: all IPPROTO_TCP option take an int */
3547 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_TCP);
3548 if (sock->conn->pcb.tcp->state == LISTEN) {
3549 done_socket(sock);
3550 return EINVAL;
3551 }
3552 switch (optname) {
3553 case TCP_NODELAY:
3554 if (*(const int *)optval) {
3555 tcp_nagle_disable(sock->conn->pcb.tcp);
3556 } else {
3557 tcp_nagle_enable(sock->conn->pcb.tcp);
3558 }
3559 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_NODELAY) -> %s\n",
3560 s, (*(const int *)optval) ? "on" : "off") );
3561 break;
3562 case TCP_KEEPALIVE:
3563 sock->conn->pcb.tcp->keep_idle = (u32_t)(*(const int *)optval);
3564 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPALIVE) -> %"U32_F"\n",
3565 s, sock->conn->pcb.tcp->keep_idle));
3566 break;
3567
3568 #if LWIP_TCP_KEEPALIVE
3569 case TCP_KEEPIDLE:
3570 sock->conn->pcb.tcp->keep_idle = 1000 * (u32_t)(*(const int *)optval);
3571 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPIDLE) -> %"U32_F"\n",
3572 s, sock->conn->pcb.tcp->keep_idle));
3573 break;
3574 case TCP_KEEPINTVL:
3575 sock->conn->pcb.tcp->keep_intvl = 1000 * (u32_t)(*(const int *)optval);
3576 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPINTVL) -> %"U32_F"\n",
3577 s, sock->conn->pcb.tcp->keep_intvl));
3578 break;
3579 case TCP_KEEPCNT:
3580 sock->conn->pcb.tcp->keep_cnt = (u32_t)(*(const int *)optval);
3581 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, TCP_KEEPCNT) -> %"U32_F"\n",
3582 s, sock->conn->pcb.tcp->keep_cnt));
3583 break;
3584 #endif /* LWIP_TCP_KEEPALIVE */
3585 default:
3586 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_TCP, UNIMPL: optname=0x%x, ..)\n",
3587 s, optname));
3588 err = ENOPROTOOPT;
3589 break;
3590 } /* switch (optname) */
3591 break;
3592 #endif /* LWIP_TCP*/
3593
3594 #if LWIP_IPV6
3595 /* Level: IPPROTO_IPV6 */
3596 case IPPROTO_IPV6:
3597 switch (optname) {
3598 case IPV6_V6ONLY:
3599 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3600 if (*(const int *)optval) {
3601 netconn_set_ipv6only(sock->conn, 1);
3602 } else {
3603 netconn_set_ipv6only(sock->conn, 0);
3604 }
3605 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, IPV6_V6ONLY, ..) -> %d\n",
3606 s, (netconn_get_ipv6only(sock->conn) ? 1 : 0)));
3607 break;
3608 #if LWIP_IPV6_MLD
3609 case IPV6_JOIN_GROUP:
3610 case IPV6_LEAVE_GROUP: {
3611 /* If this is a TCP or a RAW socket, ignore these options. */
3612 err_t mld6_err;
3613 struct netif *netif;
3614 ip6_addr_t multi_addr;
3615 const struct ipv6_mreq *imr = (const struct ipv6_mreq *)optval;
3616 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, struct ipv6_mreq, NETCONN_UDP);
3617 inet6_addr_to_ip6addr(&multi_addr, &imr->ipv6mr_multiaddr);
3618 LWIP_ASSERT("Invalid netif index", imr->ipv6mr_interface <= 0xFFu);
3619 netif = netif_get_by_index((u8_t)imr->ipv6mr_interface);
3620 if (netif == NULL) {
3621 err = EADDRNOTAVAIL;
3622 break;
3623 }
3624
3625 if (optname == IPV6_JOIN_GROUP) {
3626 if (!lwip_socket_register_mld6_membership(s, imr->ipv6mr_interface, &multi_addr)) {
3627 /* cannot track membership (out of memory) */
3628 err = ENOMEM;
3629 mld6_err = ERR_OK;
3630 } else {
3631 mld6_err = mld6_joingroup_netif(netif, &multi_addr);
3632 }
3633 } else {
3634 mld6_err = mld6_leavegroup_netif(netif, &multi_addr);
3635 lwip_socket_unregister_mld6_membership(s, imr->ipv6mr_interface, &multi_addr);
3636 }
3637 if (mld6_err != ERR_OK) {
3638 err = EADDRNOTAVAIL;
3639 }
3640 }
3641 break;
3642 #endif /* LWIP_IPV6_MLD */
3643 default:
3644 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_IPV6, UNIMPL: optname=0x%x, ..)\n",
3645 s, optname));
3646 err = ENOPROTOOPT;
3647 break;
3648 } /* switch (optname) */
3649 break;
3650 #endif /* LWIP_IPV6 */
3651
3652 #if LWIP_UDP && LWIP_UDPLITE
3653 /* Level: IPPROTO_UDPLITE */
3654 case IPPROTO_UDPLITE:
3655 /* Special case: all IPPROTO_UDPLITE option take an int */
3656 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB(sock, optlen, int);
3657 /* If this is no UDP lite socket, ignore any options. */
3658 if (!NETCONNTYPE_ISUDPLITE(netconn_type(sock->conn))) {
3659 done_socket(sock);
3660 return ENOPROTOOPT;
3661 }
3662 switch (optname) {
3663 case UDPLITE_SEND_CSCOV:
3664 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3665 /* don't allow illegal values! */
3666 sock->conn->pcb.udp->chksum_len_tx = 8;
3667 } else {
3668 sock->conn->pcb.udp->chksum_len_tx = (u16_t) * (const int *)optval;
3669 }
3670 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_SEND_CSCOV) -> %d\n",
3671 s, (*(const int *)optval)) );
3672 break;
3673 case UDPLITE_RECV_CSCOV:
3674 if ((*(const int *)optval != 0) && ((*(const int *)optval < 8) || (*(const int *)optval > 0xffff))) {
3675 /* don't allow illegal values! */
3676 sock->conn->pcb.udp->chksum_len_rx = 8;
3677 } else {
3678 sock->conn->pcb.udp->chksum_len_rx = (u16_t) * (const int *)optval;
3679 }
3680 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UDPLITE_RECV_CSCOV) -> %d\n",
3681 s, (*(const int *)optval)) );
3682 break;
3683 default:
3684 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_UDPLITE, UNIMPL: optname=0x%x, ..)\n",
3685 s, optname));
3686 err = ENOPROTOOPT;
3687 break;
3688 } /* switch (optname) */
3689 break;
3690 #endif /* LWIP_UDP */
3691 /* Level: IPPROTO_RAW */
3692 case IPPROTO_RAW:
3693 switch (optname) {
3694 #if LWIP_IPV6 && LWIP_RAW
3695 case IPV6_CHECKSUM:
3696 /* It should not be possible to disable the checksum generation with ICMPv6
3697 * as per RFC 3542 chapter 3.1 */
3698 if (sock->conn->pcb.raw->protocol == IPPROTO_ICMPV6) {
3699 done_socket(sock);
3700 return EINVAL;
3701 }
3702
3703 LWIP_SOCKOPT_CHECK_OPTLEN_CONN_PCB_TYPE(sock, optlen, int, NETCONN_RAW);
3704 if (*(const int *)optval < 0) {
3705 sock->conn->pcb.raw->chksum_reqd = 0;
3706 } else if (*(const int *)optval & 1) {
3707 /* Per RFC3542, odd offsets are not allowed */
3708 done_socket(sock);
3709 return EINVAL;
3710 } else {
3711 sock->conn->pcb.raw->chksum_reqd = 1;
3712 sock->conn->pcb.raw->chksum_offset = (u16_t) * (const int *)optval;
3713 }
3714 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, IPV6_CHECKSUM, ..) -> %d\n",
3715 s, sock->conn->pcb.raw->chksum_reqd));
3716 break;
3717 #endif /* LWIP_IPV6 && LWIP_RAW */
3718 default:
3719 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, IPPROTO_RAW, UNIMPL: optname=0x%x, ..)\n",
3720 s, optname));
3721 err = ENOPROTOOPT;
3722 break;
3723 } /* switch (optname) */
3724 break;
3725 default:
3726 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_setsockopt(%d, level=0x%x, UNIMPL: optname=0x%x, ..)\n",
3727 s, level, optname));
3728 err = ENOPROTOOPT;
3729 break;
3730 } /* switch (level) */
3731
3732 done_socket(sock);
3733 return err;
3734 }
3735
3736 int
lwip_ioctl(int s,long cmd,void * argp)3737 lwip_ioctl(int s, long cmd, void *argp)
3738 {
3739 struct lwip_sock *sock = get_socket(s);
3740 u8_t val;
3741 #if LWIP_SO_RCVBUF
3742 int recv_avail;
3743 #endif /* LWIP_SO_RCVBUF */
3744
3745 if (!sock) {
3746 return -1;
3747 }
3748
3749 switch (cmd) {
3750 #if LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE
3751 case FIONREAD:
3752 if (!argp) {
3753 set_errno(EINVAL);
3754 done_socket(sock);
3755 return -1;
3756 }
3757 #if LWIP_FIONREAD_LINUXMODE
3758 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) != NETCONN_TCP) {
3759 struct netbuf *nb;
3760 if (sock->lastdata.netbuf) {
3761 nb = sock->lastdata.netbuf;
3762 *((int *)argp) = nb->p->tot_len;
3763 } else {
3764 struct netbuf *rxbuf;
3765 err_t err = netconn_recv_udp_raw_netbuf_flags(sock->conn, &rxbuf, NETCONN_DONTBLOCK);
3766 if (err != ERR_OK) {
3767 *((int *)argp) = 0;
3768 } else {
3769 sock->lastdata.netbuf = rxbuf;
3770 *((int *)argp) = rxbuf->p->tot_len;
3771 }
3772 }
3773 done_socket(sock);
3774 return 0;
3775 }
3776 #endif /* LWIP_FIONREAD_LINUXMODE */
3777
3778 #if LWIP_SO_RCVBUF
3779 /* we come here if either LWIP_FIONREAD_LINUXMODE==0 or this is a TCP socket */
3780 SYS_ARCH_GET(sock->conn->recv_avail, recv_avail);
3781 if (recv_avail < 0) {
3782 recv_avail = 0;
3783 }
3784
3785 /* Check if there is data left from the last recv operation. /maq 041215 */
3786 if (sock->lastdata.netbuf) {
3787 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3788 recv_avail += sock->lastdata.pbuf->tot_len;
3789 } else {
3790 recv_avail += sock->lastdata.netbuf->p->tot_len;
3791 }
3792 }
3793 *((int *)argp) = recv_avail;
3794
3795 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONREAD, %p) = %"U16_F"\n", s, argp, *((u16_t *)argp)));
3796 set_errno(0);
3797 done_socket(sock);
3798 return 0;
3799 #else /* LWIP_SO_RCVBUF */
3800 break;
3801 #endif /* LWIP_SO_RCVBUF */
3802 #endif /* LWIP_SO_RCVBUF || LWIP_FIONREAD_LINUXMODE */
3803
3804 case (long)FIONBIO:
3805 val = 0;
3806 if (argp && *(int *)argp) {
3807 val = 1;
3808 }
3809 netconn_set_nonblocking(sock->conn, val);
3810 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, FIONBIO, %d)\n", s, val));
3811 set_errno(0);
3812 done_socket(sock);
3813 return 0;
3814
3815 default:
3816 break;
3817 } /* switch (cmd) */
3818 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_ioctl(%d, UNIMPL: 0x%lx, %p)\n", s, cmd, argp));
3819 set_errno(ENOSYS); /* not yet implemented */
3820 done_socket(sock);
3821 return -1;
3822 }
3823
3824 /** A minimal implementation of fcntl.
3825 * Currently only the commands F_GETFL and F_SETFL are implemented.
3826 * The flag O_NONBLOCK and access modes are supported for F_GETFL, only
3827 * the flag O_NONBLOCK is implemented for F_SETFL.
3828 */
3829 int
lwip_fcntl(int s,int cmd,int val)3830 lwip_fcntl(int s, int cmd, int val)
3831 {
3832 struct lwip_sock *sock = get_socket(s);
3833 int ret = -1;
3834 int op_mode = 0;
3835
3836 if (!sock) {
3837 return -1;
3838 }
3839
3840 switch (cmd) {
3841 case F_GETFL:
3842 ret = netconn_is_nonblocking(sock->conn) ? O_NONBLOCK : 0;
3843 set_errno(0);
3844
3845 if (NETCONNTYPE_GROUP(netconn_type(sock->conn)) == NETCONN_TCP) {
3846 #if LWIP_TCPIP_CORE_LOCKING
3847 LOCK_TCPIP_CORE();
3848 #else
3849 SYS_ARCH_DECL_PROTECT(lev);
3850 /* the proper thing to do here would be to get into the tcpip_thread,
3851 but locking should be OK as well since we only *read* some flags */
3852 SYS_ARCH_PROTECT(lev);
3853 #endif
3854 #if LWIP_TCP
3855 if (sock->conn->pcb.tcp) {
3856 if (!(sock->conn->pcb.tcp->flags & TF_RXCLOSED)) {
3857 op_mode |= O_RDONLY;
3858 }
3859 if (!(sock->conn->pcb.tcp->flags & TF_FIN)) {
3860 op_mode |= O_WRONLY;
3861 }
3862 }
3863 #endif
3864 #if LWIP_TCPIP_CORE_LOCKING
3865 UNLOCK_TCPIP_CORE();
3866 #else
3867 SYS_ARCH_UNPROTECT(lev);
3868 #endif
3869 } else {
3870 op_mode |= O_RDWR;
3871 }
3872
3873 /* ensure O_RDWR for (O_RDONLY|O_WRONLY) != O_RDWR cases */
3874 ret |= (op_mode == (O_RDONLY | O_WRONLY)) ? O_RDWR : op_mode;
3875
3876 break;
3877 case F_SETFL:
3878 /* Bits corresponding to the file access mode and the file creation flags [..] that are set in arg shall be ignored */
3879 val &= ~(O_RDONLY | O_WRONLY | O_RDWR);
3880 if ((val & ~O_NONBLOCK) == 0) {
3881 /* only O_NONBLOCK, all other bits are zero */
3882 netconn_set_nonblocking(sock->conn, val & O_NONBLOCK);
3883 ret = 0;
3884 set_errno(0);
3885 } else {
3886 set_errno(ENOSYS); /* not yet implemented */
3887 }
3888 break;
3889 default:
3890 LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_fcntl(%d, UNIMPL: %d, %d)\n", s, cmd, val));
3891 set_errno(ENOSYS); /* not yet implemented */
3892 break;
3893 }
3894 done_socket(sock);
3895 return ret;
3896 }
3897
3898 #if LWIP_COMPAT_SOCKETS == 2 && LWIP_POSIX_SOCKETS_IO_NAMES
3899 int
fcntl(int s,int cmd,...)3900 fcntl(int s, int cmd, ...)
3901 {
3902 va_list ap;
3903 int val;
3904
3905 va_start(ap, cmd);
3906 val = va_arg(ap, int);
3907 va_end(ap);
3908 return lwip_fcntl(s, cmd, val);
3909 }
3910 #endif
3911
3912 const char *
lwip_inet_ntop(int af,const void * src,char * dst,socklen_t size)3913 lwip_inet_ntop(int af, const void *src, char *dst, socklen_t size)
3914 {
3915 const char *ret = NULL;
3916 int size_int = (int)size;
3917 if (size_int < 0) {
3918 set_errno(ENOSPC);
3919 return NULL;
3920 }
3921 switch (af) {
3922 #if LWIP_IPV4
3923 case AF_INET:
3924 ret = ip4addr_ntoa_r((const ip4_addr_t *)src, dst, size_int);
3925 if (ret == NULL) {
3926 set_errno(ENOSPC);
3927 }
3928 break;
3929 #endif
3930 #if LWIP_IPV6
3931 case AF_INET6:
3932 ret = ip6addr_ntoa_r((const ip6_addr_t *)src, dst, size_int);
3933 if (ret == NULL) {
3934 set_errno(ENOSPC);
3935 }
3936 break;
3937 #endif
3938 default:
3939 set_errno(EAFNOSUPPORT);
3940 break;
3941 }
3942 return ret;
3943 }
3944
3945 int
lwip_inet_pton(int af,const char * src,void * dst)3946 lwip_inet_pton(int af, const char *src, void *dst)
3947 {
3948 int err;
3949 switch (af) {
3950 #if LWIP_IPV4
3951 case AF_INET:
3952 err = ip4addr_aton(src, (ip4_addr_t *)dst);
3953 break;
3954 #endif
3955 #if LWIP_IPV6
3956 case AF_INET6: {
3957 /* convert into temporary variable since ip6_addr_t might be larger
3958 than in6_addr when scopes are enabled */
3959 ip6_addr_t addr;
3960 err = ip6addr_aton(src, &addr);
3961 if (err) {
3962 memcpy(dst, &addr.addr, sizeof(addr.addr));
3963 }
3964 break;
3965 }
3966 #endif
3967 default:
3968 err = -1;
3969 set_errno(EAFNOSUPPORT);
3970 break;
3971 }
3972 return err;
3973 }
3974
3975 #if LWIP_IGMP
3976 /** Register a new IGMP membership. On socket close, the membership is dropped automatically.
3977 *
3978 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
3979 *
3980 * @return 1 on success, 0 on failure
3981 */
3982 static int
lwip_socket_register_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)3983 lwip_socket_register_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
3984 {
3985 struct lwip_sock *sock = get_socket(s);
3986 int i;
3987
3988 if (!sock) {
3989 return 0;
3990 }
3991
3992 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
3993 if (socket_ipv4_multicast_memberships[i].sock == NULL) {
3994 socket_ipv4_multicast_memberships[i].sock = sock;
3995 ip4_addr_copy(socket_ipv4_multicast_memberships[i].if_addr, *if_addr);
3996 ip4_addr_copy(socket_ipv4_multicast_memberships[i].multi_addr, *multi_addr);
3997 done_socket(sock);
3998 return 1;
3999 }
4000 }
4001 done_socket(sock);
4002 return 0;
4003 }
4004
4005 /** Unregister a previously registered membership. This prevents dropping the membership
4006 * on socket close.
4007 *
4008 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4009 */
4010 static void
lwip_socket_unregister_membership(int s,const ip4_addr_t * if_addr,const ip4_addr_t * multi_addr)4011 lwip_socket_unregister_membership(int s, const ip4_addr_t *if_addr, const ip4_addr_t *multi_addr)
4012 {
4013 struct lwip_sock *sock = get_socket(s);
4014 int i;
4015
4016 if (!sock) {
4017 return;
4018 }
4019
4020 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4021 if ((socket_ipv4_multicast_memberships[i].sock == sock) &&
4022 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].if_addr, if_addr) &&
4023 ip4_addr_cmp(&socket_ipv4_multicast_memberships[i].multi_addr, multi_addr)) {
4024 socket_ipv4_multicast_memberships[i].sock = NULL;
4025 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4026 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4027 break;
4028 }
4029 }
4030 done_socket(sock);
4031 }
4032
4033 /** Drop all memberships of a socket that were not dropped explicitly via setsockopt.
4034 *
4035 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4036 */
4037 static void
lwip_socket_drop_registered_memberships(int s)4038 lwip_socket_drop_registered_memberships(int s)
4039 {
4040 struct lwip_sock *sock = get_socket(s);
4041 int i;
4042
4043 if (!sock) {
4044 return;
4045 }
4046
4047 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4048 if (socket_ipv4_multicast_memberships[i].sock == sock) {
4049 ip_addr_t multi_addr, if_addr;
4050 ip_addr_copy_from_ip4(multi_addr, socket_ipv4_multicast_memberships[i].multi_addr);
4051 ip_addr_copy_from_ip4(if_addr, socket_ipv4_multicast_memberships[i].if_addr);
4052 socket_ipv4_multicast_memberships[i].sock = NULL;
4053 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].if_addr);
4054 ip4_addr_set_zero(&socket_ipv4_multicast_memberships[i].multi_addr);
4055
4056 netconn_join_leave_group(sock->conn, &multi_addr, &if_addr, NETCONN_LEAVE);
4057 }
4058 }
4059 done_socket(sock);
4060 }
4061 #endif /* LWIP_IGMP */
4062
4063 #if LWIP_IPV6_MLD
4064 /** Register a new MLD6 membership. On socket close, the membership is dropped automatically.
4065 *
4066 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4067 *
4068 * @return 1 on success, 0 on failure
4069 */
4070 static int
lwip_socket_register_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4071 lwip_socket_register_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4072 {
4073 struct lwip_sock *sock = get_socket(s);
4074 int i;
4075
4076 if (!sock) {
4077 return 0;
4078 }
4079
4080 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4081 if (socket_ipv6_multicast_memberships[i].sock == NULL) {
4082 socket_ipv6_multicast_memberships[i].sock = sock;
4083 socket_ipv6_multicast_memberships[i].if_idx = (u8_t)if_idx;
4084 ip6_addr_copy(socket_ipv6_multicast_memberships[i].multi_addr, *multi_addr);
4085 done_socket(sock);
4086 return 1;
4087 }
4088 }
4089 done_socket(sock);
4090 return 0;
4091 }
4092
4093 /** Unregister a previously registered MLD6 membership. This prevents dropping the membership
4094 * on socket close.
4095 *
4096 * ATTENTION: this function is called from tcpip_thread (or under CORE_LOCK).
4097 */
4098 static void
lwip_socket_unregister_mld6_membership(int s,unsigned int if_idx,const ip6_addr_t * multi_addr)4099 lwip_socket_unregister_mld6_membership(int s, unsigned int if_idx, const ip6_addr_t *multi_addr)
4100 {
4101 struct lwip_sock *sock = get_socket(s);
4102 int i;
4103
4104 if (!sock) {
4105 return;
4106 }
4107
4108 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4109 if ((socket_ipv6_multicast_memberships[i].sock == sock) &&
4110 (socket_ipv6_multicast_memberships[i].if_idx == if_idx) &&
4111 ip6_addr_cmp(&socket_ipv6_multicast_memberships[i].multi_addr, multi_addr)) {
4112 socket_ipv6_multicast_memberships[i].sock = NULL;
4113 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4114 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4115 break;
4116 }
4117 }
4118 done_socket(sock);
4119 }
4120
4121 /** Drop all MLD6 memberships of a socket that were not dropped explicitly via setsockopt.
4122 *
4123 * ATTENTION: this function is NOT called from tcpip_thread (or under CORE_LOCK).
4124 */
4125 static void
lwip_socket_drop_registered_mld6_memberships(int s)4126 lwip_socket_drop_registered_mld6_memberships(int s)
4127 {
4128 struct lwip_sock *sock = get_socket(s);
4129 int i;
4130
4131 if (!sock) {
4132 return;
4133 }
4134
4135 for (i = 0; i < LWIP_SOCKET_MAX_MEMBERSHIPS; i++) {
4136 if (socket_ipv6_multicast_memberships[i].sock == sock) {
4137 ip_addr_t multi_addr;
4138 u8_t if_idx;
4139
4140 ip_addr_copy_from_ip6(multi_addr, socket_ipv6_multicast_memberships[i].multi_addr);
4141 if_idx = socket_ipv6_multicast_memberships[i].if_idx;
4142
4143 socket_ipv6_multicast_memberships[i].sock = NULL;
4144 socket_ipv6_multicast_memberships[i].if_idx = NETIF_NO_INDEX;
4145 ip6_addr_set_zero(&socket_ipv6_multicast_memberships[i].multi_addr);
4146
4147 netconn_join_leave_group_netif(sock->conn, &multi_addr, if_idx, NETCONN_LEAVE);
4148 }
4149 }
4150 done_socket(sock);
4151 }
4152 #endif /* LWIP_IPV6_MLD */
4153
4154 #endif /* LWIP_SOCKET */
4155