]> git.saurik.com Git - apple/xnu.git/blob - tests/net_tuntests.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / tests / net_tuntests.c
1 /* -*- compile-command: "xcrun --sdk iphoneos.internal make net_tuntests" -*- */
2
3 #include <inttypes.h>
4 #include <stdbool.h>
5 #include <stdio.h>
6 #include <stdlib.h>
7 #include <time.h>
8 #include <string.h>
9 #include <unistd.h>
10 #include <poll.h>
11 #include <sys/types.h>
12 #include <sys/event.h>
13 #include <sys/time.h>
14 #include <uuid/uuid.h>
15 #include <arpa/inet.h>
16 #include <sys/sysctl.h>
17 #include <sys/kern_control.h>
18 #include <sys/ioctl.h>
19 #include <sys/socket.h>
20 #include <sys/kern_control.h>
21 #include <sys/sys_domain.h>
22
23 #include <net/if.h>
24 #include <net/if_ipsec.h>
25 #include <net/if_utun.h>
26 #include <netinet/in.h>
27 #include <netinet/in_var.h>
28 #include <net/pfkeyv2.h>
29 #include <netinet6/ipsec.h>
30
31 #include <darwintest.h>
32 #include <darwintest_utils.h>
33
34 #include <skywalk/os_skywalk_private.h> // for SK_FEATURE_*
35
36 T_GLOBAL_META(T_META_NAMESPACE("xnu.net.tun"));
37
38 /* Disable all these test until <rdar://problem/49124468> is fixed */
39 T_GLOBAL_META(T_META_ENABLED(false));
40
41 #if 0
42 #undef T_QUIET
43 #define T_QUIET
44 #endif
45
46 #if 0
47 static void
48 log_hexdump(const void *inp, size_t len)
49 {
50 unsigned i, off = 0;
51 char buf[9 + 16 * 3 + 1];
52 for (i = 0; i < len; i++) {
53 if (i % 16 == 0) {
54 off = (unsigned)snprintf(buf, sizeof(buf), "%08x:", i);
55 }
56 off += (unsigned)snprintf(buf + off, sizeof(buf) - off, " %02x", (((const uint8_t *)inp)[i]) & 0xff);
57 if (i % 16 == 15) {
58 T_LOG("%s", buf);
59 }
60 }
61 if (len % 16) {
62 T_LOG("%s", buf);
63 }
64 }
65 #else
66 static void
67 log_hexdump(const void *inp, size_t len)
68 {
69 #pragma unused(inp, len)
70 }
71 #endif
72
73 static bool
74 is_netagent_enabled(void)
75 {
76 int enabled = 0;
77 size_t len = sizeof(enabled);
78 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(sysctlbyname("net.link.generic.system.enable_netagent", &enabled, &len, NULL, 0), NULL);
79 T_QUIET; T_ASSERT_EQ(len, sizeof(enabled), NULL);
80 return enabled == 1;
81 }
82
83 static bool g_is_ipsec_test;
84 static bool g_is_utun_test;
85 static int g_OPT_ENABLE_NETIF = -1;
86 static int g_OPT_ENABLE_FLOWSWITCH = -1;
87 static int g_OPT_ENABLE_CHANNEL = -1;
88 static int g_OPT_GET_CHANNEL_UUID = -1;
89 static int g_OPT_IFNAME = -1;
90 static char *g_CONTROL_NAME = NULL;
91
92 static int create_tunsock_old(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[]);
93 static int create_tunsock_new(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[]);
94 static int (*create_tunsock)(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[]);
95
96 static void
97 setup_ipsec_test(void)
98 {
99 T_LOG("Configuring for ipsec tests");
100 g_OPT_ENABLE_NETIF = IPSEC_OPT_ENABLE_NETIF;
101 g_OPT_ENABLE_FLOWSWITCH = IPSEC_OPT_ENABLE_FLOWSWITCH;
102 g_OPT_ENABLE_CHANNEL = IPSEC_OPT_ENABLE_CHANNEL;
103 g_OPT_GET_CHANNEL_UUID = IPSEC_OPT_GET_CHANNEL_UUID;
104 g_OPT_IFNAME = IPSEC_OPT_IFNAME;
105 g_CONTROL_NAME = IPSEC_CONTROL_NAME;
106 create_tunsock = create_tunsock_new;
107 g_is_ipsec_test = true;
108 }
109
110 static void
111 setup_utun_test(void)
112 {
113 T_LOG("Configuring for utun tests");
114 g_OPT_ENABLE_NETIF = UTUN_OPT_ENABLE_NETIF;
115 g_OPT_ENABLE_FLOWSWITCH = UTUN_OPT_ENABLE_FLOWSWITCH;
116 g_OPT_ENABLE_CHANNEL = UTUN_OPT_ENABLE_CHANNEL;
117 g_OPT_GET_CHANNEL_UUID = UTUN_OPT_GET_CHANNEL_UUID;
118 g_OPT_IFNAME = UTUN_OPT_IFNAME;
119 g_CONTROL_NAME = UTUN_CONTROL_NAME;
120 create_tunsock = create_tunsock_old;
121 g_is_utun_test = true;
122 }
123
124 static bool
125 setblocking(int s, bool blocking)
126 {
127 int flags;
128 bool ret;
129
130 T_QUIET; T_EXPECT_POSIX_SUCCESS(flags = fcntl(s, F_GETFL, 0), NULL);
131
132 ret = !(flags & O_NONBLOCK);
133
134 if (blocking) {
135 flags &= ~O_NONBLOCK;
136 } else {
137 flags |= O_NONBLOCK;
138 }
139
140 #if 0
141 T_LOG("Setting fd %d from %s to %s\n",
142 s, ret ? "blocking" : "nonblocking",
143 blocking ? "blocking" : "nonblocking");
144 #endif
145
146 T_QUIET; T_EXPECT_POSIX_SUCCESS(flags = fcntl(s, F_SETFL, flags), NULL);
147
148 return ret;
149 }
150
151
152 static void
153 check_enables(int tunsock, int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[])
154 {
155 int scratch;
156 socklen_t scratchlen, uuidlen;
157 uuid_t scratchuuid[channel_count];
158 if (!uuid) {
159 uuid = scratchuuid;
160 }
161
162 //T_LOG("checking tunsock %d", tunsock);
163
164 if (g_is_ipsec_test && channel_count && !enable_netif) {
165 /* Unfortunately, the connect incorrectly unwinds the bind if it get an error.
166 * until that is fixed, expect EINVAL here
167 */
168 scratchlen = sizeof(scratch);
169 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
170 &scratch, &scratchlen), EINVAL, NULL);
171 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
172 &scratch, &scratchlen), EINVAL, NULL);
173 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
174 &scratch, &scratchlen), EINVAL, NULL);
175 for (int i = 0; i < channel_count; i++) {
176 uuid_clear(uuid[i]);
177 }
178 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
179 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
180 uuid, &uuidlen), EINVAL, NULL);
181 for (int i = 0; i < channel_count; i++) {
182 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
183 }
184 return;
185 }
186
187
188 scratchlen = sizeof(scratch);
189 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
190 &scratch, &scratchlen), NULL);
191 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL);
192 T_QUIET; T_EXPECT_EQ(scratch, enable_netif, NULL);
193
194 scratchlen = sizeof(scratch);
195 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
196 &scratch, &scratchlen), NULL);
197 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL);
198 if (is_netagent_enabled()) {
199 if (enable_netif) {
200 T_QUIET; T_EXPECT_EQ(scratch, enable_flowswitch, NULL);
201 } else {
202 T_QUIET; T_EXPECT_EQ(scratch, 0, NULL);
203 }
204 } else {
205 T_QUIET; T_EXPECT_EQ(scratch, 0, NULL);
206 }
207
208 scratchlen = sizeof(scratch);
209 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
210 &scratch, &scratchlen), NULL);
211 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)scratchlen, sizeof(scratch), NULL);
212 if (g_is_ipsec_test && !enable_netif) {
213 T_QUIET; T_EXPECT_EQ(scratch, 0, NULL);
214 } else {
215 T_QUIET; T_EXPECT_EQ(scratch, (int)channel_count, NULL);
216 }
217
218 if (scratch) {
219 for (int i = 0; i < channel_count; i++) {
220 uuid_clear(uuid[i]);
221 }
222 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
223 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
224 uuid, &uuidlen), NULL);
225 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
226 for (int i = 0; i < channel_count; i++) {
227 T_QUIET; T_EXPECT_FALSE(uuid_is_null(uuid[i]), NULL);
228 }
229 } else {
230 for (int i = 0; i < channel_count; i++) {
231 uuid_clear(uuid[i]);
232 }
233 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
234 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
235 uuid, &uuidlen), ENXIO, NULL);
236 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
237 for (int i = 0; i < channel_count; i++) {
238 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
239 }
240 }
241 }
242
243 static void
244 tunsock_get_ifname(int s, char ifname[IFXNAMSIZ])
245 {
246 socklen_t optlen = IFXNAMSIZ;
247 T_QUIET; T_WITH_ERRNO; T_ASSERT_POSIX_ZERO(getsockopt(s, SYSPROTO_CONTROL, g_OPT_IFNAME, ifname, &optlen), NULL);
248 T_QUIET; T_ASSERT_TRUE(optlen > 0, NULL);
249 T_QUIET; T_ASSERT_TRUE(ifname[optlen - 1] == '\0', NULL);
250 T_QUIET; T_ASSERT_TRUE(strlen(ifname) + 1 == optlen, "got ifname \"%s\" len %zd expected %u", ifname, strlen(ifname), optlen);
251 }
252
253 static short
254 ifnet_get_flags(int s, const char ifname[IFNAMSIZ])
255 {
256 struct ifreq ifr;
257 memset(&ifr, 0, sizeof(ifr));
258 strlcpy(ifr.ifr_name, ifname, sizeof(ifr.ifr_name));
259 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(s, SIOCGIFFLAGS, (caddr_t)&ifr), NULL);
260 return ifr.ifr_flags;
261 }
262
263 static void
264 ifnet_add_addr4(const char ifname[IFNAMSIZ], struct in_addr *addr, struct in_addr *mask, struct in_addr *broadaddr)
265 {
266 struct sockaddr_in *sin;
267 struct in_aliasreq ifra;
268 int s;
269
270 T_QUIET; T_EXPECT_POSIX_SUCCESS(s = socket(AF_INET, SOCK_DGRAM, 0), NULL);
271
272 memset(&ifra, 0, sizeof(ifra));
273 strlcpy(ifra.ifra_name, ifname, sizeof(ifra.ifra_name));
274
275 if (addr != NULL) {
276 sin = &ifra.ifra_addr;
277 sin->sin_len = sizeof(*sin);
278 sin->sin_family = AF_INET;
279 sin->sin_addr = *addr;
280 }
281
282 if (mask != NULL) {
283 sin = &ifra.ifra_mask;
284 sin->sin_len = sizeof(*sin);
285 sin->sin_family = AF_INET;
286 sin->sin_addr = *mask;
287 }
288
289 if (broadaddr != NULL || (addr != NULL &&
290 (ifnet_get_flags(s, ifname) & IFF_POINTOPOINT) != 0)) {
291 sin = &ifra.ifra_broadaddr;
292 sin->sin_len = sizeof(*sin);
293 sin->sin_family = AF_INET;
294 sin->sin_addr = (broadaddr != NULL) ? *broadaddr : *addr;
295 }
296
297 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(s, SIOCAIFADDR, &ifra), NULL);
298
299 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(s), NULL);
300 }
301
302 static int g_pfkeyso = -1;
303 static struct in_addr g_addr1, g_addr2;
304
305 static void
306 create_sa(const char ifname[IFXNAMSIZ], uint8_t type, uint32_t spi, struct in_addr *src, struct in_addr *dst)
307 {
308 if (g_pfkeyso == -1) {
309 T_QUIET; T_EXPECT_POSIX_SUCCESS(g_pfkeyso = socket(PF_KEY, SOCK_RAW, PF_KEY_V2), NULL);
310 }
311
312 /*
313 * <base, SA, (lifetime(HS),) address(SD), (address(P),)
314 * key(AE), (identity(SD),) (sensitivity)>
315 */
316
317 struct {
318 struct sadb_msg msg __attribute((aligned(sizeof(uint64_t))));
319 struct sadb_key key __attribute((aligned(sizeof(uint64_t))));
320 struct sadb_sa sa __attribute((aligned(sizeof(uint64_t))));
321 struct sadb_x_sa2 sa2 __attribute((aligned(sizeof(uint64_t))));
322 struct sadb_x_ipsecif ipsecif __attribute((aligned(sizeof(uint64_t))));
323 struct {
324 struct sadb_address addr __attribute((aligned(sizeof(uint64_t))));
325 struct sockaddr_in saddr __attribute((aligned(sizeof(uint64_t))));
326 } src;
327 struct {
328 struct sadb_address addr __attribute((aligned(sizeof(uint64_t))));
329 struct sockaddr_in saddr __attribute((aligned(sizeof(uint64_t))));
330 } dst;
331 } addcmd;
332
333 memset(&addcmd, 0, sizeof(addcmd));
334
335 addcmd.msg.sadb_msg_version = PF_KEY_V2;
336 addcmd.msg.sadb_msg_type = type;
337 addcmd.msg.sadb_msg_errno = 0;
338 addcmd.msg.sadb_msg_satype = SADB_SATYPE_ESP;
339 addcmd.msg.sadb_msg_len = PFKEY_UNIT64(sizeof(addcmd));
340 addcmd.msg.sadb_msg_reserved = 0;
341 addcmd.msg.sadb_msg_seq = 0;
342 addcmd.msg.sadb_msg_pid = (unsigned)getpid();
343
344 addcmd.key.sadb_key_len = PFKEY_UNIT64(sizeof(addcmd.key));
345 addcmd.key.sadb_key_exttype = SADB_EXT_KEY_ENCRYPT;
346 addcmd.key.sadb_key_bits = 0;
347 addcmd.key.sadb_key_reserved = 0;
348
349 addcmd.sa.sadb_sa_len = PFKEY_UNIT64(sizeof(addcmd.sa));
350 addcmd.sa.sadb_sa_exttype = SADB_EXT_SA;
351 addcmd.sa.sadb_sa_spi = htonl(spi);
352 addcmd.sa.sadb_sa_replay = 0;
353 addcmd.sa.sadb_sa_state = 0;
354 addcmd.sa.sadb_sa_auth = SADB_AALG_NONE;
355 addcmd.sa.sadb_sa_encrypt = SADB_EALG_NULL;
356 addcmd.sa.sadb_sa_flags = SADB_X_EXT_CYCSEQ;
357
358 addcmd.sa2.sadb_x_sa2_len = PFKEY_UNIT64(sizeof(addcmd.sa2));
359 addcmd.sa2.sadb_x_sa2_exttype = SADB_X_EXT_SA2;
360 addcmd.sa2.sadb_x_sa2_mode = IPSEC_MODE_ANY;
361 addcmd.sa2.sadb_x_sa2_alwaysexpire = 1;
362 addcmd.sa2.sadb_x_sa2_flags = SADB_X_EXT_SA2_DELETE_ON_DETACH;
363 addcmd.sa2.sadb_x_sa2_sequence = 0;
364 addcmd.sa2.sadb_x_sa2_reqid = 0;
365
366 addcmd.ipsecif.sadb_x_ipsecif_len = PFKEY_UNIT64(sizeof(addcmd.ipsecif));
367 addcmd.ipsecif.sadb_x_ipsecif_exttype = SADB_X_EXT_IPSECIF;
368 memset(addcmd.ipsecif.sadb_x_ipsecif_internal_if, 0, sizeof(addcmd.ipsecif.sadb_x_ipsecif_internal_if));
369 memset(addcmd.ipsecif.sadb_x_ipsecif_outgoing_if, 0, sizeof(addcmd.ipsecif.sadb_x_ipsecif_outgoing_if));
370 strlcpy(addcmd.ipsecif.sadb_x_ipsecif_ipsec_if, ifname, sizeof(addcmd.ipsecif.sadb_x_ipsecif_ipsec_if));
371 addcmd.ipsecif.sadb_x_ipsecif_init_disabled = 0;
372 addcmd.ipsecif.reserved = 0;
373
374 addcmd.src.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.src));
375 addcmd.src.addr.sadb_address_exttype = SADB_EXT_ADDRESS_SRC;
376 addcmd.src.addr.sadb_address_proto = IPSEC_ULPROTO_ANY;
377 addcmd.src.addr.sadb_address_prefixlen = sizeof(struct in_addr) << 3; //XXX Why?
378 addcmd.src.addr.sadb_address_reserved = 0;
379 addcmd.src.saddr.sin_len = sizeof(addcmd.src.saddr);
380 addcmd.src.saddr.sin_family = AF_INET;
381 addcmd.src.saddr.sin_port = htons(0);
382 addcmd.src.saddr.sin_addr = *src;
383
384 addcmd.dst.addr.sadb_address_len = PFKEY_UNIT64(sizeof(addcmd.dst));
385 addcmd.dst.addr.sadb_address_exttype = SADB_EXT_ADDRESS_DST;
386 addcmd.dst.addr.sadb_address_proto = IPSEC_ULPROTO_ANY;
387 addcmd.dst.addr.sadb_address_prefixlen = sizeof(struct in_addr) << 3; //XXX Why?
388 addcmd.dst.addr.sadb_address_reserved = 0;
389 addcmd.dst.saddr.sin_len = sizeof(addcmd.dst.saddr);
390 addcmd.dst.saddr.sin_family = AF_INET;
391 addcmd.dst.saddr.sin_port = htons(0);
392 addcmd.dst.saddr.sin_addr = *dst;
393
394 log_hexdump(&addcmd, sizeof(addcmd));
395
396 ssize_t slen;
397 T_QUIET; T_EXPECT_POSIX_SUCCESS(slen = send(g_pfkeyso, &addcmd, sizeof(addcmd), 0), NULL);
398 T_QUIET; T_EXPECT_EQ(slen, (ssize_t)sizeof(addcmd), NULL);
399 }
400
401 /* This version of the test expects channels to be enabled after connect.
402 * Once the utun driver is converted, switch to create_tunsock_new
403 */
404 static int
405 create_tunsock_old(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[])
406 {
407 int tunsock;
408 struct ctl_info kernctl_info;
409 struct sockaddr_ctl kernctl_addr;
410 uuid_t scratchuuid[channel_count];
411 if (!uuid) {
412 uuid = scratchuuid;
413 }
414 socklen_t uuidlen;
415
416 startover:
417
418 T_QUIET; T_EXPECT_POSIX_SUCCESS(tunsock = socket(PF_SYSTEM, SOCK_DGRAM, SYSPROTO_CONTROL), NULL);
419
420 memset(&kernctl_info, 0, sizeof(kernctl_info));
421 strlcpy(kernctl_info.ctl_name, g_CONTROL_NAME, sizeof(kernctl_info.ctl_name));
422 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(tunsock, CTLIOCGINFO, &kernctl_info), NULL);
423
424 memset(&kernctl_addr, 0, sizeof(kernctl_addr));
425 kernctl_addr.sc_len = sizeof(kernctl_addr);
426 kernctl_addr.sc_family = AF_SYSTEM;
427 kernctl_addr.ss_sysaddr = AF_SYS_CONTROL;
428 kernctl_addr.sc_id = kernctl_info.ctl_id;
429 kernctl_addr.sc_unit = 0;
430
431 T_LOG("%s: enable_netif = %d, enable_flowswitch = %d, channel_count = %d",
432 __func__, enable_netif, enable_flowswitch, channel_count);
433
434 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
435 &enable_netif, sizeof(enable_netif)), EINVAL, NULL);
436 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
437 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
438 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
439 &channel_count, sizeof(channel_count)), EINVAL, NULL);
440 for (int i = 0; i < channel_count; i++) {
441 uuid_clear(uuid[i]);
442 }
443 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
444 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
445 uuid, &uuidlen), EINVAL, NULL);
446 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
447 for (int i = 0; i < channel_count; i++) {
448 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
449 }
450
451 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(bind(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr)), NULL);
452
453 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
454 &enable_netif, sizeof(enable_netif)), NULL);
455 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
456 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
457 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
458 &channel_count, sizeof(channel_count)), EINVAL, NULL);
459 for (int i = 0; i < channel_count; i++) {
460 uuid_clear(uuid[i]);
461 }
462 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
463 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
464 uuid, &uuidlen), ENXIO, NULL);
465 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
466 for (int i = 0; i < channel_count; i++) {
467 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
468 }
469
470 int error = connect(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr));
471 if (error == -1 && errno == EBUSY) {
472 /* XXX remove this retry nonsense when this is fixed:
473 * <rdar://problem/37340313> creating an interface without specifying specific interface name should not return EBUSY
474 */
475 close(tunsock);
476 T_LOG("connect got EBUSY, sleeping 1 second before retry");
477 sleep(1);
478 goto startover;
479 }
480 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(error, "connect()");
481
482 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
483 &enable_netif, sizeof(enable_netif)), EINVAL, NULL);
484
485 if (is_netagent_enabled()) {
486 if (enable_netif) {
487 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
488 &enable_flowswitch, sizeof(enable_flowswitch)), NULL);
489 } else {
490 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
491 &enable_flowswitch, sizeof(enable_flowswitch)), ENOENT, NULL);
492 }
493 } else {
494 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
495 &enable_flowswitch, sizeof(enable_flowswitch)), ENOTSUP, NULL);
496 }
497
498 if (channel_count) {
499 if (g_is_ipsec_test && !enable_netif) {
500 /* ipsec doesn't support channels without a netif */
501 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
502 &channel_count, sizeof(channel_count)), EOPNOTSUPP, NULL);
503 for (int i = 0; i < channel_count; i++) {
504 uuid_clear(uuid[i]);
505 }
506 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
507 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
508 uuid, &uuidlen), ENXIO, NULL);
509 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
510 for (int i = 0; i < channel_count; i++) {
511 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
512 }
513 } else {
514 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
515 &channel_count, sizeof(channel_count)), NULL);
516 for (int i = 0; i < channel_count; i++) {
517 uuid_clear(uuid[i]);
518 }
519 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
520 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
521 uuid, &uuidlen), NULL);
522 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
523 for (int i = 0; i < channel_count; i++) {
524 T_QUIET; T_EXPECT_FALSE(uuid_is_null(uuid[i]), NULL);
525 }
526 }
527 } else {
528 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
529 &channel_count, sizeof(channel_count)), ENXIO, NULL);
530 for (int i = 0; i < channel_count; i++) {
531 uuid_clear(uuid[i]);
532 }
533 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
534 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
535 uuid, &uuidlen), ENXIO, NULL);
536 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
537 for (int i = 0; i < channel_count; i++) {
538 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
539 }
540 }
541
542 check_enables(tunsock, enable_netif, enable_flowswitch, channel_count, uuid);
543
544 //T_LOG("Returning tunsock %d", tunsock);
545
546 return tunsock;
547 }
548
549 /* This version of the test expects channels to be enabled before connect
550 * Once the utun driver is converted, rename this to just create_tunsock
551 */
552 static int
553 create_tunsock_new(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[])
554 {
555 int tunsock;
556 struct ctl_info kernctl_info;
557 struct sockaddr_ctl kernctl_addr;
558 uuid_t scratchuuid[channel_count];
559 if (!uuid) {
560 uuid = scratchuuid;
561 }
562 socklen_t uuidlen;
563
564 startover:
565
566 T_QUIET; T_EXPECT_POSIX_SUCCESS(tunsock = socket(PF_SYSTEM, SOCK_DGRAM, SYSPROTO_CONTROL), NULL);
567
568 memset(&kernctl_info, 0, sizeof(kernctl_info));
569 strlcpy(kernctl_info.ctl_name, g_CONTROL_NAME, sizeof(kernctl_info.ctl_name));
570 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(ioctl(tunsock, CTLIOCGINFO, &kernctl_info), NULL);
571
572 memset(&kernctl_addr, 0, sizeof(kernctl_addr));
573 kernctl_addr.sc_len = sizeof(kernctl_addr);
574 kernctl_addr.sc_family = AF_SYSTEM;
575 kernctl_addr.ss_sysaddr = AF_SYS_CONTROL;
576 kernctl_addr.sc_id = kernctl_info.ctl_id;
577 kernctl_addr.sc_unit = 0;
578
579 T_LOG("%s: enable_netif = %d, enable_flowswitch = %d, channel_count = %d",
580 __func__, enable_netif, enable_flowswitch, channel_count);
581
582 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
583 &enable_netif, sizeof(enable_netif)), EINVAL, NULL);
584 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
585 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
586 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
587 &channel_count, sizeof(channel_count)), EINVAL, NULL);
588 for (int i = 0; i < channel_count; i++) {
589 uuid_clear(uuid[i]);
590 }
591 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
592 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
593 uuid, &uuidlen), EINVAL, NULL);
594 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
595 for (int i = 0; i < channel_count; i++) {
596 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
597 }
598
599 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(bind(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr)), NULL);
600
601 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
602 &enable_netif, sizeof(enable_netif)), NULL);
603 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
604 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
605 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
606 &channel_count, sizeof(channel_count)), NULL);
607
608 for (int i = 0; i < channel_count; i++) {
609 uuid_clear(uuid[i]);
610 }
611 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
612 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
613 uuid, &uuidlen), ENXIO, NULL);
614 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
615 for (int i = 0; i < channel_count; i++) {
616 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
617 }
618
619 int error = connect(tunsock, (struct sockaddr *)&kernctl_addr, sizeof(kernctl_addr));
620 if (error == -1 && errno == EBUSY) {
621 /* XXX remove this retry nonsense when this is fixed:
622 * <rdar://problem/37340313> creating an interface without specifying specific interface name should not return EBUSY
623 */
624 close(tunsock);
625 T_LOG("connect got EBUSY, sleeping 1 second before retry");
626 sleep(1);
627 goto startover;
628 }
629 if (g_is_ipsec_test && channel_count && !enable_netif) {
630 /* ipsec doesn't support channels without a netif */
631 T_QUIET; T_EXPECT_POSIX_FAILURE(error, ENOTSUP, "connect() == -1 && errno == ENOTSUP");
632 } else {
633 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(error, "connect() == 0");
634 }
635
636 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_NETIF,
637 &enable_netif, sizeof(enable_netif)), EINVAL, NULL);
638
639 if (g_is_ipsec_test && channel_count && !enable_netif) {
640 /* Connect failed above, so we get EINVAL */
641 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
642 &enable_flowswitch, sizeof(enable_flowswitch)), EINVAL, NULL);
643 } else {
644 if (is_netagent_enabled()) {
645 if (enable_netif) {
646 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
647 &enable_flowswitch, sizeof(enable_flowswitch)), NULL);
648 } else {
649 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
650 &enable_flowswitch, sizeof(enable_flowswitch)), ENOENT, NULL);
651 }
652 } else {
653 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_FLOWSWITCH,
654 &enable_flowswitch, sizeof(enable_flowswitch)), ENOTSUP, NULL);
655 }
656 }
657
658 T_QUIET; T_EXPECT_POSIX_FAILURE(setsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_ENABLE_CHANNEL,
659 &channel_count, sizeof(channel_count)), EINVAL, NULL);
660
661 for (int i = 0; i < channel_count; i++) {
662 uuid_clear(uuid[i]);
663 }
664 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
665 if (!channel_count || (g_is_ipsec_test && channel_count && !enable_netif)) {
666 /* ipsec doesn't support channels without a netif */
667 if (g_is_ipsec_test && channel_count && !enable_netif) {
668 /* Unfortunately, the connect incorrectly unwinds the bind if it get an error.
669 * until that is fixed, expect EINVAL here
670 */
671 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
672 uuid, &uuidlen), EINVAL, NULL);
673 } else {
674 T_QUIET; T_EXPECT_POSIX_FAILURE(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
675 uuid, &uuidlen), ENXIO, NULL);
676 }
677 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
678 for (int i = 0; i < channel_count; i++) {
679 T_QUIET; T_EXPECT_TRUE(uuid_is_null(uuid[i]), NULL);
680 }
681 } else {
682 uuidlen = sizeof(uuid_t) * (unsigned int)channel_count;
683 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(getsockopt(tunsock, SYSPROTO_CONTROL, g_OPT_GET_CHANNEL_UUID,
684 uuid, &uuidlen), NULL);
685 T_QUIET; T_EXPECT_EQ_ULONG((unsigned long)uuidlen, sizeof(uuid_t) * (unsigned long)channel_count, NULL);
686 for (int i = 0; i < channel_count; i++) {
687 T_QUIET; T_EXPECT_FALSE(uuid_is_null(uuid[i]), NULL);
688 }
689 }
690
691 check_enables(tunsock, enable_netif, enable_flowswitch, channel_count, uuid);
692
693 //T_LOG("Returning tunsock %d", tunsock);
694
695 return tunsock;
696 }
697
698 static int (*create_tunsock)(int enable_netif, int enable_flowswitch, int channel_count, uuid_t uuid[]) = create_tunsock_new;
699
700 #if 0
701 static void
702 ipsec_stats(void)
703 {
704 struct ifmibdata ifmd;
705
706 len = sizeof(struct ifmibdata);
707 name[3] = IFMIB_IFDATA;
708 name[4] = interesting_row;
709 name[5] = IpFDATA_GENERAL;
710 if (sysctl(name, 6, &ifmd, &len, (void *)0, 0) == -1) {
711 err(1, "sysctl IFDATA_GENERAL %d", interesting_row);
712 }
713 }
714 #endif
715
716 static void
717 permute_enables(void)
718 {
719 int tunsock;
720 T_EXPECT_GE(tunsock = create_tunsock(false, false, false, NULL), 0, NULL);
721 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
722 T_EXPECT_GE(tunsock = create_tunsock(false, false, true, NULL), 0, NULL);
723 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
724 T_EXPECT_GE(tunsock = create_tunsock(false, true, false, NULL), 0, NULL);
725 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
726 T_EXPECT_GE(tunsock = create_tunsock(false, true, true, NULL), 0, NULL);
727 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
728 T_EXPECT_GE(tunsock = create_tunsock(true, false, false, NULL), 0, NULL);
729 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
730 T_EXPECT_GE(tunsock = create_tunsock(true, false, true, NULL), 0, NULL);
731 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
732 T_EXPECT_GE(tunsock = create_tunsock(true, true, false, NULL), 0, NULL);
733 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
734 T_EXPECT_GE(tunsock = create_tunsock(true, true, true, NULL), 0, NULL);
735 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(tunsock), NULL);
736 }
737
738 T_DECL(ipsec_enables, "This test checks combinations of netif/channel/flowswitch on ipsec")
739 {
740 setup_ipsec_test();
741 permute_enables();
742 }
743
744 T_DECL(utun_enables, "This test checks combinations of netif/channel/flowswitch on utun")
745 {
746 setup_utun_test();
747 permute_enables();
748 }
749
750 static int g_tunsock = -1;
751
752 static void
753 cleanup_tunsock(void)
754 {
755 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(g_tunsock), NULL);
756 T_QUIET; T_EXPECT_POSIX_FAILURE(close(g_tunsock), EBADF, NULL);
757 if (g_is_ipsec_test) {
758 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(g_pfkeyso), NULL);
759 T_QUIET; T_EXPECT_POSIX_FAILURE(close(g_pfkeyso), EBADF, NULL);
760 }
761 }
762
763 static void
764 setup_tunsock(int channel_count, uuid_t uuids[])
765 {
766 T_ASSERT_GE(g_tunsock = create_tunsock(true, false, channel_count, uuids), 0, NULL);
767 T_ATEND(cleanup_tunsock);
768
769 char ifname[IFXNAMSIZ];
770 tunsock_get_ifname(g_tunsock, ifname);
771
772 T_LOG("Created interface %s", ifname);
773
774 uint32_t ifaddr = (10 << 24) | ((unsigned)getpid() & 0xffff) << 8 | 160;
775 struct in_addr mask;
776 g_addr1.s_addr = htonl(ifaddr);
777 g_addr2.s_addr = htonl(ifaddr + 1);
778 mask.s_addr = htonl(0xffffffff);
779
780 ifnet_add_addr4(ifname, &g_addr1, &mask, &g_addr2);
781
782 if (g_is_ipsec_test) {
783 create_sa(ifname, SADB_ADD, 12345, &g_addr1, &g_addr2);
784 create_sa(ifname, SADB_ADD, 12346, &g_addr2, &g_addr1);
785 }
786 }
787
788 T_DECL(setup_ipsec, "This test sets up an ipsec interface")
789 {
790 setup_ipsec_test();
791 setup_tunsock(1, NULL);
792 }
793
794 T_DECL(setup_utun, "This test sets up a utun interface")
795 {
796 setup_utun_test();
797 setup_tunsock(1, NULL);
798 }
799
800 static const int SOCKET_TRAFFIC_CLASSES[] = {
801 SO_TC_BK_SYS, // BK
802 SO_TC_BK, // BK
803 SO_TC_BE, // BE
804 SO_TC_RD, // BE
805 SO_TC_OAM, // BE
806 SO_TC_AV, // VI
807 SO_TC_RV, // VI
808 SO_TC_VI, // VI
809 SO_TC_VO, // VO
810 SO_TC_CTL, // VO
811 };
812
813 // this should match ipsec_find_tx_ring_by_svc in ipsec driver
814 static const int SOCKET_TC_TO_RING[] = {
815 3,
816 3,
817 2,
818 2,
819 2,
820 1,
821 1,
822 1,
823 0,
824 0,
825 };
826
827 /* How many sockets map to this ring */
828 static const int RING_TO_TC_COUNT[] = {
829 2, 3, 3, 2,
830 };
831
832 static void
833 setup_channels_and_rings(int kq, int channel_count, channel_t channels[], channel_ring_t rxrings[], channel_ring_t txrings[], uuid_t uuids[], int cfds[])
834 {
835 setup_tunsock(channel_count, uuids);
836
837 #if 0
838 // give time to enable a tcpdump if desired
839 T_LOG("Sleeping 10");
840 sleep(10);
841 T_LOG("Done");
842 #endif
843
844 for (int ri = 0; ri < channel_count; ri++) {
845 if (rxrings) {
846 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(channels[ri] = os_channel_create(uuids[ri], 0), NULL);
847 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(rxrings[ri] = os_channel_rx_ring(channels[ri],
848 os_channel_ring_id(channels[ri], CHANNEL_FIRST_RX_RING)), NULL);
849 }
850 if (txrings) {
851 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(channels[ri] = os_channel_create(uuids[ri], 0), NULL);
852 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(rxrings[ri] = os_channel_rx_ring(channels[ri],
853 os_channel_ring_id(channels[ri], CHANNEL_FIRST_TX_RING)), NULL);
854 }
855
856 struct kevent kev;
857 T_QUIET; T_EXPECT_POSIX_SUCCESS(cfds[ri] = os_channel_get_fd(channels[ri]), NULL);
858 EV_SET(&kev, cfds[ri], EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, (void *)(uintptr_t)ri);
859 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(kevent(kq, &kev, 1, NULL, 0, NULL), NULL);
860 }
861 }
862
863 static void
864 cleanup_channels_and_rings(int channel_count, channel_t channels[], channel_ring_t rxrings[], channel_ring_t txrings[], uuid_t uuids[])
865 {
866 for (int ri = 0; ri < channel_count; ri++) {
867 if (rxrings) {
868 rxrings[ri] = NULL;
869 }
870 if (txrings) {
871 rxrings[ri] = NULL;
872 }
873 os_channel_destroy(channels[ri]);
874 channels[ri] = NULL;
875 uuid_clear(uuids[ri]);
876 }
877 }
878
879 static void
880 setup_sockets(int sockets[SO_TC_MAX], int type)
881 {
882 for (int si = 0; si < SO_TC_MAX; si++) {
883 T_QUIET; T_EXPECT_POSIX_SUCCESS(sockets[si] = socket(PF_INET, type, 0), NULL);
884
885 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(setsockopt(sockets[si], SOL_SOCKET,
886 SO_TRAFFIC_CLASS, &SOCKET_TRAFFIC_CLASSES[si], sizeof(SOCKET_TRAFFIC_CLASSES[si])), NULL);
887
888 // XXX setsockopt(IP_BOUND_IF) here?
889
890 struct sockaddr_in sin;
891 memset(&sin, 0, sizeof(sin));
892 sin.sin_len = sizeof(sin);
893 sin.sin_family = AF_INET;
894 sin.sin_addr = g_addr1;
895
896 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(bind(sockets[si], (struct sockaddr *)&sin, sizeof(sin)), NULL);
897
898 char sbuf[INET6_ADDRSTRLEN];
899 inet_ntop(sin.sin_family, &sin.sin_addr.s_addr, sbuf, sizeof(sbuf));
900 #if 0
901 T_LOG("%s socket %d bound to %s port %d",
902 type == SOCK_DGRAM ? "udp" : type == SOCK_STREAM ? "tcp" : "???",
903 sockets[si], sbuf, ntohs(sin.sin_port));
904 #endif
905 setblocking(sockets[si], false);
906 }
907 }
908
909 static void
910 cleanup_sockets(int sockets[SO_TC_MAX])
911 {
912 for (int si = 0; si < SO_TC_MAX; si++) {
913 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(sockets[si]), NULL);
914 sockets[si] = -1;
915 }
916 }
917
918 static void
919 drain_ring(channel_ring_t rxring)
920 {
921 uint32_t i, sc = os_channel_available_slot_count(rxring);
922 channel_slot_t rxprev = NULL;
923 for (i = 0; i < sc; i++) {
924 slot_prop_t rxprop;
925 channel_slot_t rxslot;
926
927 memset(&rxprop, 0, sizeof(rxprop));
928 T_QUIET; T_WITH_ERRNO; T_EXPECT_NOTNULL(rxslot = os_channel_get_next_slot(rxring, rxprev, &rxprop), NULL);
929 T_QUIET; T_ASSERT_NE_UINT(0, rxprop.sp_len, NULL);
930 T_QUIET; T_ASSERT_NOTNULL((void *)rxprop.sp_buf_ptr, NULL);
931
932 log_hexdump((void *)rxprop.sp_buf_ptr, rxprop.sp_len);
933
934 rxprev = rxslot;
935 }
936 if (sc) {
937 T_QUIET; T_EXPECT_POSIX_ZERO(os_channel_advance_slot(rxring, rxprev), NULL);
938 }
939 }
940
941 static void
942 send_one_packet(int s, int type)
943 {
944 struct sockaddr_in sin;
945 memset(&sin, 0, sizeof(sin));
946 sin.sin_len = sizeof(sin);
947 sin.sin_family = AF_INET;
948 sin.sin_addr = g_addr2;
949 sin.sin_port = ntohs(12345);
950
951 if (type == SOCK_STREAM) {
952 T_QUIET; T_EXPECT_POSIX_FAILURE(connect(s, (struct sockaddr *)&sin, sizeof(sin)), EINPROGRESS, NULL);
953 }
954 if (type == SOCK_DGRAM) {
955 T_QUIET; T_WITH_ERRNO; T_EXPECT_EQ_LONG((long)sizeof(s), sendto(s, &s, sizeof(s), 0,
956 (struct sockaddr *)&sin, sizeof(sin)), NULL);
957 }
958 }
959
960 static void
961 expect_empty_rings(int channel_count, channel_ring_t rings[])
962 {
963 /* Check all the rings and make sure there are no packets */
964 for (int ri = 0; ri < channel_count; ri++) {
965 T_QUIET; T_EXPECT_EQ_UINT(0U, os_channel_available_slot_count(rings[ri]), NULL);
966 }
967 }
968
969 static void
970 xfer_1_packet_singly(int channel_count, int type)
971 {
972 uuid_t uuids[channel_count];
973 channel_t channels[channel_count];
974 int sockets[SO_TC_MAX];
975 channel_ring_t rxrings[channel_count];
976 int cfds[channel_count];
977 int kq;
978
979 T_QUIET; T_EXPECT_POSIX_SUCCESS(kq = kqueue(), NULL);
980
981 setup_channels_and_rings(kq, channel_count, channels, rxrings, NULL, uuids, cfds);
982
983 setup_sockets(sockets, type);
984
985 for (int si = 0; si < SO_TC_MAX; si++) {
986 expect_empty_rings(channel_count, rxrings);
987
988 send_one_packet(sockets[si], type);
989
990 int expected_ring = channel_count == 1 ? 0 : SOCKET_TC_TO_RING[si];
991
992 /* Wait for the packet delivery and check that it's only one packet and on the correct ring */
993 struct kevent kev[channel_count + 1];
994 int nev;
995 memset(kev, 0, sizeof(kev));
996 struct timespec to = { 0, 100 * NSEC_PER_MSEC }; // 100 ms
997 T_QUIET; T_EXPECT_POSIX_SUCCESS(nev = kevent(kq, NULL, 0, kev, channel_count + 1, &to), NULL);
998 T_QUIET; T_EXPECT_EQ_INT(nev, 1, NULL);
999 T_QUIET; T_EXPECT_EQ_PTR((void *)kev[0].ident, (void *)(uintptr_t)cfds[expected_ring], NULL);
1000 T_QUIET; T_EXPECT_EQ_PTR(kev[0].udata, (void *)(uintptr_t)expected_ring, NULL);
1001 T_QUIET; T_EXPECT_EQ_SHORT(kev[0].filter, (short)EVFILT_READ, NULL);
1002 T_QUIET; T_EXPECT_FALSE(kev[0].flags & EV_ERROR, NULL);
1003
1004 /* Make sure it comes out the expected interface */
1005 for (int ri = 0; ri < channel_count; ri++) {
1006 errno = 0;
1007
1008 uint32_t sc = os_channel_available_slot_count(rxrings[ri]);
1009
1010 /* Check that the packet appears only on the expected ring and
1011 * is the only packet on the expected ring.
1012 */
1013 T_QUIET; T_EXPECT_EQ_UINT(ri == expected_ring, sc, NULL);
1014
1015 if ((ri == expected_ring) == sc) {
1016 T_PASS("tc index %d ring %d expected ring %d slot count %u", si, ri, expected_ring, sc);
1017 } else {
1018 T_FAIL("tc index %d ring %d expected ring %d slot count %u", si, ri, expected_ring, sc);
1019 }
1020
1021 drain_ring(rxrings[ri]);
1022 }
1023 }
1024
1025 cleanup_sockets(sockets);
1026
1027 cleanup_channels_and_rings(channel_count, channels, rxrings, NULL, uuids);
1028
1029 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(kq), NULL);
1030 }
1031
1032 T_DECL(ipsec35889979u1s, "transfers 1 packet at a time of each sevice class over udp to a single ring")
1033 {
1034 setup_ipsec_test();
1035 xfer_1_packet_singly(1, SOCK_DGRAM);
1036 }
1037
1038 T_DECL(ipsec35889979u4s, "transfers 1 packet at a time of each sevice class over udp to 4 rings")
1039 {
1040 setup_ipsec_test();
1041 xfer_1_packet_singly(4, SOCK_DGRAM);
1042 }
1043
1044 T_DECL(ipsec35889979t1s, "transfers 1 packet at a time of each sevice class over tcp to a single ring")
1045 {
1046 setup_ipsec_test();
1047 xfer_1_packet_singly(1, SOCK_STREAM);
1048 }
1049
1050
1051 T_DECL(ipsec35889979t4s, "transfers 1 packet at a time of each sevice class over tcp to 4 rings",
1052 /* This test will fail because tcp syn packets get elevated
1053 * due to ack prioritization
1054 */
1055 T_META_ENABLED(false))
1056 {
1057 setup_ipsec_test();
1058 xfer_1_packet_singly(4, SOCK_STREAM);
1059 }
1060
1061 static void
1062 xfer_1_packet_together(int channel_count, int type)
1063 {
1064 uuid_t uuids[channel_count];
1065 channel_t channels[channel_count];
1066 int sockets[SO_TC_MAX];
1067 channel_ring_t rxrings[channel_count];
1068 int cfds[channel_count];
1069 int kq;
1070
1071 T_QUIET; T_EXPECT_POSIX_SUCCESS(kq = kqueue(), NULL);
1072
1073 setup_channels_and_rings(kq, channel_count, channels, rxrings, NULL, uuids, cfds);
1074
1075 setup_sockets(sockets, type);
1076
1077 for (int si = 0; si < SO_TC_MAX; si++) {
1078 expect_empty_rings(channel_count, rxrings);
1079
1080 send_one_packet(sockets[si], type);
1081 }
1082
1083 /* Sleep to make sure all packets get delivered */
1084 struct timespec to = { 0, 100 * NSEC_PER_MSEC }; // 100 ms
1085 nanosleep(&to, NULL);
1086
1087 /* Wait for the packet delivery and check that all rings event */
1088 struct kevent kev[channel_count + 1];
1089 int nev;
1090 memset(kev, 0, sizeof(kev));
1091 T_QUIET; T_EXPECT_POSIX_SUCCESS(nev = kevent(kq, NULL, 0, kev, channel_count + 1, &to), NULL);
1092 T_QUIET; T_EXPECT_EQ_INT(nev, channel_count, NULL);
1093
1094 uint32_t found[channel_count];
1095 memset(found, 0, sizeof(found));
1096 for (int e = 0; e < nev; e++) {
1097 T_LOG("kevent %lu filter 0x%4x flags 0x%04x fflags 0x%08x data %"PRIdPTR" udata %p",
1098 kev[e].ident, kev[e].filter, kev[e].flags, kev[e].fflags, kev[e].data, kev[e].udata);
1099
1100 T_QUIET; T_ASSERT_GE_PTR(kev[e].udata, (void *)0, NULL);
1101 T_QUIET; T_ASSERT_LT_PTR(kev[e].udata, (void *)(intptr_t)channel_count, NULL);
1102 int ri = (int)kev[e].udata;
1103 T_QUIET; T_EXPECT_EQ_UINT(found[ri], 0U, NULL);
1104
1105 T_QUIET; T_EXPECT_EQ_ULONG(kev[e].ident, (uintptr_t)cfds[ri], NULL);
1106 T_QUIET; T_EXPECT_EQ_SHORT(kev[e].filter, (short)EVFILT_READ, NULL);
1107 T_QUIET; T_EXPECT_FALSE(kev[e].flags & EV_ERROR, NULL);
1108
1109 if (channel_count == 1) {
1110 T_QUIET; T_EXPECT_EQ_LONG(kev[e].data, (long)SO_TC_MAX, NULL);
1111 } else {
1112 T_QUIET; T_EXPECT_EQ_LONG(kev[e].data, (long)RING_TO_TC_COUNT[ri], NULL);
1113 }
1114
1115 found[ri] += (uint32_t)kev[e].data;
1116 }
1117 /* Check that something came out of all rings */
1118 for (int ri = 0; ri < channel_count; ri++) {
1119 T_QUIET; T_EXPECT_NE_UINT(found[ri], 0U, NULL);
1120 }
1121
1122 /* Make sure it comes out the expected interface */
1123 for (int ri = 0; ri < channel_count; ri++) {
1124 uint32_t sc = os_channel_available_slot_count(rxrings[ri]);
1125 if (channel_count == 1) {
1126 if (sc == SO_TC_MAX) {
1127 T_PASS("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, SO_TC_MAX);
1128 } else {
1129 T_FAIL("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, SO_TC_MAX);
1130 }
1131 } else {
1132 if (sc == (uint32_t)RING_TO_TC_COUNT[ri]) {
1133 T_PASS("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, (uint32_t)RING_TO_TC_COUNT[ri]);
1134 } else {
1135 T_FAIL("ring %d got %"PRIu32" slots expecting %"PRIu32"", ri, sc, (uint32_t)RING_TO_TC_COUNT[ri]);
1136 }
1137 }
1138
1139 drain_ring(rxrings[ri]);
1140 }
1141
1142 cleanup_sockets(sockets);
1143
1144 cleanup_channels_and_rings(channel_count, channels, rxrings, NULL, uuids);
1145
1146 T_QUIET; T_WITH_ERRNO; T_EXPECT_POSIX_ZERO(close(kq), NULL);
1147 }
1148
1149 T_DECL(ipsec35889979u1m, "transfers 1 packet together of each sevice class over udp to a single ring")
1150 {
1151 setup_ipsec_test();
1152 xfer_1_packet_together(1, SOCK_DGRAM);
1153 }
1154
1155 T_DECL(ipsec35889979u4m, "transfers 1 packet together of each sevice class over udp to 4 rings")
1156 {
1157 setup_ipsec_test();
1158 xfer_1_packet_together(4, SOCK_DGRAM);
1159 }
1160
1161 T_DECL(ipsec35889979t1m, "transfers 1 packet together of each sevice class over tcp to a single ring")
1162 {
1163 setup_ipsec_test();
1164 xfer_1_packet_together(1, SOCK_STREAM);
1165 }
1166
1167 T_DECL(ipsec35889979t4m, "transfers 1 packet together of each sevice class over tcp to 4 rings",
1168 /* This test will fail because tcp syn packets get elevated
1169 * due to ack prioritization
1170 */
1171 T_META_ENABLED(false))
1172 {
1173 setup_ipsec_test();
1174 xfer_1_packet_together(4, SOCK_STREAM);
1175 }