]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_mbuf.c
c4b070b42e7a535ebeedad5bcbf47709fb73579f
[apple/xnu.git] / bsd / kern / kpi_mbuf.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #define __KPI__
24 //#include <sys/kpi_interface.h>
25
26 #include <sys/param.h>
27 #include <sys/mbuf.h>
28 #include <sys/socket.h>
29 #include <kern/debug.h>
30 #include <libkern/OSAtomic.h>
31 #include <kern/kalloc.h>
32 #include <string.h>
33
34 void mbuf_tag_id_first_last(u_long *first, u_long *last);
35 errno_t mbuf_tag_id_find_internal(const char *string, u_long *out_id, int create);
36
37 static const mbuf_flags_t mbuf_flags_mask = MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
38 MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
39 MBUF_LASTFRAG | MBUF_PROMISC;
40
41 void* mbuf_data(mbuf_t mbuf)
42 {
43 return m_mtod(mbuf);
44 }
45
46 void* mbuf_datastart(mbuf_t mbuf)
47 {
48 if (mbuf->m_flags & M_EXT)
49 return mbuf->m_ext.ext_buf;
50 if (mbuf->m_flags & M_PKTHDR)
51 return mbuf->m_pktdat;
52 return mbuf->m_dat;
53 }
54
55 errno_t mbuf_setdata(mbuf_t mbuf, void* data, size_t len)
56 {
57 size_t start = (size_t)((char*)mbuf_datastart(mbuf));
58 size_t maxlen = mbuf_maxlen(mbuf);
59
60 if ((size_t)data < start || ((size_t)data) + len > start + maxlen)
61 return EINVAL;
62 mbuf->m_data = data;
63 mbuf->m_len = len;
64
65 return 0;
66 }
67
68 errno_t mbuf_align_32(mbuf_t mbuf, size_t len)
69 {
70 if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf))
71 return ENOTSUP;
72 mbuf->m_data = mbuf_datastart(mbuf);
73 mbuf->m_data += ((mbuf_trailingspace(mbuf) - len) &~ (sizeof(u_int32_t) - 1));
74
75 return 0;
76 }
77
78 addr64_t mbuf_data_to_physical(void* ptr)
79 {
80 return (addr64_t)mcl_to_paddr(ptr);
81 }
82
83 errno_t mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
84 {
85 /* Must set *mbuf to NULL in failure case */
86 *mbuf = m_get(how, type);
87
88 return (*mbuf == NULL) ? ENOMEM : 0;
89 }
90
91 errno_t mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
92 {
93 /* Must set *mbuf to NULL in failure case */
94 *mbuf = m_gethdr(how, type);
95
96 return (*mbuf == NULL) ? ENOMEM : 0;
97 }
98
99 extern struct mbuf * m_mbigget(struct mbuf *m, int nowait);
100
101 errno_t mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t* mbuf)
102 {
103 /* Must set *mbuf to NULL in failure case */
104 errno_t error = 0;
105 int created = 0;
106
107 if (mbuf == NULL)
108 return EINVAL;
109 if (*mbuf == NULL) {
110 *mbuf = m_get(how, type);
111 if (*mbuf == NULL)
112 return ENOMEM;
113 created = 1;
114 }
115 /*
116 * At the time this code was written, m_mclget and m_mbigget would always
117 * return the same value that was passed in to it.
118 */
119 if (size == MCLBYTES) {
120 *mbuf = m_mclget(*mbuf, how);
121 } else if (size == NBPG) {
122 *mbuf = m_mbigget(*mbuf, how);
123 } else {
124 error = EINVAL;
125 goto out;
126 }
127 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0)
128 error = ENOMEM;
129 out:
130 if (created && error != 0) {
131 error = ENOMEM;
132 mbuf_free(*mbuf);
133 *mbuf = NULL;
134 }
135 return error;
136 }
137
138 errno_t mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
139 {
140 /* Must set *mbuf to NULL in failure case */
141 errno_t error = 0;
142 int created = 0;
143 if (mbuf == NULL) return EINVAL;
144 if (*mbuf == NULL) {
145 error = mbuf_get(how, type, mbuf);
146 if (error)
147 return error;
148 created = 1;
149 }
150
151 /*
152 * At the time this code was written, m_mclget would always
153 * return the same value that was passed in to it.
154 */
155 *mbuf = m_mclget(*mbuf, how);
156
157 if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
158 mbuf_free(*mbuf);
159 *mbuf = NULL;
160 }
161 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0)
162 error = ENOMEM;
163 return error;
164 }
165
166
167 errno_t mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
168 {
169 /* Must set *mbuf to NULL in failure case */
170 errno_t error = 0;
171
172 *mbuf = m_getpacket_how(how);
173
174 if (*mbuf == NULL) {
175 if (how == MBUF_WAITOK)
176 error = ENOMEM;
177 else
178 error = EWOULDBLOCK;
179 }
180
181 return error;
182 }
183
184 mbuf_t mbuf_free(mbuf_t mbuf)
185 {
186 return m_free(mbuf);
187 }
188
189 void mbuf_freem(mbuf_t mbuf)
190 {
191 m_freem(mbuf);
192 }
193
194 int mbuf_freem_list(mbuf_t mbuf)
195 {
196 return m_freem_list(mbuf);
197 }
198
199 size_t mbuf_leadingspace(mbuf_t mbuf)
200 {
201 return m_leadingspace(mbuf);
202 }
203
204 size_t mbuf_trailingspace(mbuf_t mbuf)
205 {
206 return m_trailingspace(mbuf);
207 }
208
209 /* Manipulation */
210 errno_t mbuf_copym(mbuf_t src, size_t offset, size_t len,
211 mbuf_how_t how, mbuf_t *new_mbuf)
212 {
213 /* Must set *mbuf to NULL in failure case */
214 *new_mbuf = m_copym(src, offset, len, how);
215
216 return (*new_mbuf == NULL) ? ENOMEM : 0;
217 }
218
219 errno_t mbuf_dup(mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
220 {
221 /* Must set *new_mbuf to NULL in failure case */
222 *new_mbuf = m_dup(src, how);
223
224 return (*new_mbuf == NULL) ? ENOMEM : 0;
225 }
226
227 errno_t mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
228 {
229 /* Must set *orig to NULL in failure case */
230 *orig = m_prepend_2(*orig, len, how);
231
232 return (*orig == NULL) ? ENOMEM : 0;
233 }
234
235 errno_t mbuf_split(mbuf_t src, size_t offset,
236 mbuf_how_t how, mbuf_t *new_mbuf)
237 {
238 /* Must set *new_mbuf to NULL in failure case */
239 *new_mbuf = m_split(src, offset, how);
240
241 return (*new_mbuf == NULL) ? ENOMEM : 0;
242 }
243
244 errno_t mbuf_pullup(mbuf_t *mbuf, size_t len)
245 {
246 /* Must set *mbuf to NULL in failure case */
247 *mbuf = m_pullup(*mbuf, len);
248
249 return (*mbuf == NULL) ? ENOMEM : 0;
250 }
251
252 errno_t mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
253 {
254 /* Must set *location to NULL in failure case */
255 int new_offset;
256 *location = m_pulldown(src, *offset, len, &new_offset);
257 *offset = new_offset;
258
259 return (*location == NULL) ? ENOMEM : 0;
260 }
261
262 void mbuf_adj(mbuf_t mbuf, int len)
263 {
264 m_adj(mbuf, len);
265 }
266
267 errno_t mbuf_copydata(mbuf_t m, size_t off, size_t len, void* out_data)
268 {
269 /* Copied m_copydata, added error handling (don't just panic) */
270 int count;
271
272 while (off > 0) {
273 if (m == 0)
274 return EINVAL;
275 if (off < (size_t)m->m_len)
276 break;
277 off -= m->m_len;
278 m = m->m_next;
279 }
280 while (len > 0) {
281 if (m == 0)
282 return EINVAL;
283 count = m->m_len - off > len ? len : m->m_len - off;
284 bcopy(mtod(m, caddr_t) + off, out_data, count);
285 len -= count;
286 out_data = ((char*)out_data) + count;
287 off = 0;
288 m = m->m_next;
289 }
290
291 return 0;
292 }
293
294 int mbuf_mclref(mbuf_t mbuf)
295 {
296 return m_mclref(mbuf);
297 }
298
299 int mbuf_mclunref(mbuf_t mbuf)
300 {
301 return m_mclunref(mbuf);
302 }
303
304 int mbuf_mclhasreference(mbuf_t mbuf)
305 {
306 if ((mbuf->m_flags & M_EXT))
307 return m_mclhasreference(mbuf);
308 else
309 return 0;
310 }
311
312
313 /* mbuf header */
314 mbuf_t mbuf_next(mbuf_t mbuf)
315 {
316 return mbuf->m_next;
317 }
318
319 errno_t mbuf_setnext(mbuf_t mbuf, mbuf_t next)
320 {
321 if (next && ((next)->m_nextpkt != NULL ||
322 (next)->m_type == MT_FREE)) return EINVAL;
323 mbuf->m_next = next;
324
325 return 0;
326 }
327
328 mbuf_t mbuf_nextpkt(mbuf_t mbuf)
329 {
330 return mbuf->m_nextpkt;
331 }
332
333 void mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
334 {
335 mbuf->m_nextpkt = nextpkt;
336 }
337
338 size_t mbuf_len(mbuf_t mbuf)
339 {
340 return mbuf->m_len;
341 }
342
343 void mbuf_setlen(mbuf_t mbuf, size_t len)
344 {
345 mbuf->m_len = len;
346 }
347
348 size_t mbuf_maxlen(mbuf_t mbuf)
349 {
350 if (mbuf->m_flags & M_EXT)
351 return mbuf->m_ext.ext_size;
352 return &mbuf->m_dat[MLEN] - ((char*)mbuf_datastart(mbuf));
353 }
354
355 mbuf_type_t mbuf_type(mbuf_t mbuf)
356 {
357 return mbuf->m_type;
358 }
359
360 errno_t mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
361 {
362 if (new_type == MBUF_TYPE_FREE) return EINVAL;
363
364 m_mchtype(mbuf, new_type);
365
366 return 0;
367 }
368
369 mbuf_flags_t mbuf_flags(mbuf_t mbuf)
370 {
371 return mbuf->m_flags & mbuf_flags_mask;
372 }
373
374 errno_t mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
375 {
376 if ((flags & ~mbuf_flags_mask) != 0) return EINVAL;
377 mbuf->m_flags = flags |
378 (mbuf->m_flags & ~mbuf_flags_mask);
379
380 return 0;
381 }
382
383 errno_t mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
384 {
385 if (((flags | mask) & ~mbuf_flags_mask) != 0) return EINVAL;
386
387 mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
388
389 return 0;
390 }
391
392 errno_t mbuf_copy_pkthdr(mbuf_t dest, mbuf_t src)
393 {
394 if (((src)->m_flags & M_PKTHDR) == 0)
395 return EINVAL;
396
397 m_copy_pkthdr(dest, src);
398
399 return 0;
400 }
401
402 size_t mbuf_pkthdr_len(mbuf_t mbuf)
403 {
404 return mbuf->m_pkthdr.len;
405 }
406
407 void mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
408 {
409 mbuf->m_pkthdr.len = len;
410 }
411
412 ifnet_t mbuf_pkthdr_rcvif(mbuf_t mbuf)
413 {
414 // If we reference count ifnets, we should take a reference here before returning
415 return mbuf->m_pkthdr.rcvif;
416 }
417
418 errno_t mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
419 {
420 /* May want to walk ifnet list to determine if interface is valid */
421 mbuf->m_pkthdr.rcvif = (struct ifnet*)ifnet;
422 return 0;
423 }
424
425 void* mbuf_pkthdr_header(mbuf_t mbuf)
426 {
427 return mbuf->m_pkthdr.header;
428 }
429
430 void mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
431 {
432 mbuf->m_pkthdr.header = (void*)header;
433 }
434
435 /* mbuf aux data */
436 errno_t mbuf_aux_add(mbuf_t mbuf, int family, mbuf_type_t type, mbuf_t *aux_mbuf)
437 {
438 *aux_mbuf = m_aux_add(mbuf, family, type);
439 return (*aux_mbuf == NULL) ? ENOMEM : 0;
440 }
441
442 mbuf_t mbuf_aux_find(mbuf_t mbuf, int family, mbuf_type_t type)
443 {
444 return m_aux_find(mbuf, family, type);
445 }
446
447 void mbuf_aux_delete(mbuf_t mbuf, mbuf_t aux)
448 {
449 m_aux_delete(mbuf, aux);
450 }
451
452 void
453 mbuf_inbound_modified(mbuf_t mbuf)
454 {
455 /* Invalidate hardware generated checksum flags */
456 mbuf->m_pkthdr.csum_flags = 0;
457 }
458
459 extern void in_cksum_offset(struct mbuf* m, size_t ip_offset);
460 extern void in_delayed_cksum_offset(struct mbuf *m, int ip_offset);
461
462 void
463 mbuf_outbound_finalize(mbuf_t mbuf, u_long protocol_family, size_t protocol_offset)
464 {
465 if ((mbuf->m_pkthdr.csum_flags &
466 (CSUM_DELAY_DATA | CSUM_DELAY_IP | CSUM_TCP_SUM16)) == 0)
467 return;
468
469 /* Generate the packet in software, client needs it */
470 switch (protocol_family) {
471 case PF_INET:
472 if (mbuf->m_pkthdr.csum_flags & CSUM_TCP_SUM16) {
473 /*
474 * If you're wondering where this lovely code comes
475 * from, we're trying to undo what happens in ip_output.
476 * Look for CSUM_TCP_SUM16 in ip_output.
477 */
478 u_int16_t first, second;
479 mbuf->m_pkthdr.csum_flags &= ~CSUM_TCP_SUM16;
480 mbuf->m_pkthdr.csum_flags |= CSUM_TCP;
481 first = mbuf->m_pkthdr.csum_data >> 16;
482 second = mbuf->m_pkthdr.csum_data & 0xffff;
483 mbuf->m_pkthdr.csum_data = first - second;
484 }
485 if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
486 in_delayed_cksum_offset(mbuf, protocol_offset);
487 }
488
489 if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
490 in_cksum_offset(mbuf, protocol_offset);
491 }
492
493 mbuf->m_pkthdr.csum_flags &= ~(CSUM_DELAY_DATA | CSUM_DELAY_IP);
494 break;
495
496 default:
497 /*
498 * Not sure what to do here if anything.
499 * Hardware checksum code looked pretty IPv4 specific.
500 */
501 if ((mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_IP)) != 0)
502 panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%d)!\n", protocol_family);
503 }
504 }
505
506 errno_t
507 mbuf_set_vlan_tag(
508 mbuf_t mbuf,
509 u_int16_t vlan)
510 {
511 mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
512 mbuf->m_pkthdr.vlan_tag = vlan;
513
514 return 0;
515 }
516
517 errno_t
518 mbuf_get_vlan_tag(
519 mbuf_t mbuf,
520 u_int16_t *vlan)
521 {
522 if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0)
523 return ENXIO; // No vlan tag set
524
525 *vlan = mbuf->m_pkthdr.vlan_tag;
526
527 return 0;
528 }
529
530 errno_t
531 mbuf_clear_vlan_tag(
532 mbuf_t mbuf)
533 {
534 mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
535 mbuf->m_pkthdr.vlan_tag = 0;
536
537 return 0;
538 }
539
540 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
541 MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP | MBUF_CSUM_REQ_SUM16;
542
543 errno_t
544 mbuf_set_csum_requested(
545 mbuf_t mbuf,
546 mbuf_csum_request_flags_t request,
547 u_int32_t value)
548 {
549 request &= mbuf_valid_csum_request_flags;
550 mbuf->m_pkthdr.csum_flags = (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
551 mbuf->m_pkthdr.csum_data = value;
552
553 return 0;
554 }
555
556 errno_t
557 mbuf_get_csum_requested(
558 mbuf_t mbuf,
559 mbuf_csum_request_flags_t *request,
560 u_int32_t *value)
561 {
562 *request = mbuf->m_pkthdr.csum_flags;
563 *request &= mbuf_valid_csum_request_flags;
564 if (value != NULL) {
565 *value = mbuf->m_pkthdr.csum_data;
566 }
567
568 return 0;
569 }
570
571 errno_t
572 mbuf_clear_csum_requested(
573 mbuf_t mbuf)
574 {
575 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
576 mbuf->m_pkthdr.csum_data = 0;
577
578 return 0;
579 }
580
581 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
582 MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
583 MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_TCP_SUM16;
584
585 errno_t
586 mbuf_set_csum_performed(
587 mbuf_t mbuf,
588 mbuf_csum_performed_flags_t performed,
589 u_int32_t value)
590 {
591 performed &= mbuf_valid_csum_performed_flags;
592 mbuf->m_pkthdr.csum_flags = (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
593 mbuf->m_pkthdr.csum_data = value;
594
595 return 0;
596 }
597
598 errno_t
599 mbuf_get_csum_performed(
600 mbuf_t mbuf,
601 mbuf_csum_performed_flags_t *performed,
602 u_int32_t *value)
603 {
604 *performed = mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
605 *value = mbuf->m_pkthdr.csum_data;
606
607 return 0;
608 }
609
610 errno_t
611 mbuf_clear_csum_performed(
612 mbuf_t mbuf)
613 {
614 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
615 mbuf->m_pkthdr.csum_data = 0;
616
617 return 0;
618 }
619
620 /*
621 * Mbuf tag KPIs
622 */
623
624 struct mbuf_tag_id_entry {
625 SLIST_ENTRY(mbuf_tag_id_entry) next;
626 mbuf_tag_id_t id;
627 char string[];
628 };
629
630 #define MBUF_TAG_ID_ENTRY_SIZE(__str) \
631 ((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
632 strlen(__str) + 1)
633
634 #define MTAG_FIRST_ID 1000
635 static u_long mtag_id_next = MTAG_FIRST_ID;
636 static SLIST_HEAD(,mbuf_tag_id_entry) mtag_id_list = {NULL};
637 static lck_mtx_t *mtag_id_lock = NULL;
638
639 __private_extern__ void
640 mbuf_tag_id_first_last(
641 u_long *first,
642 u_long *last)
643 {
644 *first = MTAG_FIRST_ID;
645 *last = mtag_id_next - 1;
646 }
647
648 __private_extern__ errno_t
649 mbuf_tag_id_find_internal(
650 const char *string,
651 u_long *out_id,
652 int create)
653 {
654 struct mbuf_tag_id_entry *entry = NULL;
655
656
657 *out_id = 0;
658
659 if (string == NULL || out_id == NULL) {
660 return EINVAL;
661 }
662
663 /* Don't bother allocating the lock if we're only doing a lookup */
664 if (create == 0 && mtag_id_lock == NULL)
665 return ENOENT;
666
667 /* Allocate lock if necessary */
668 if (mtag_id_lock == NULL) {
669 lck_grp_attr_t *grp_attrib = NULL;
670 lck_attr_t *lck_attrb = NULL;
671 lck_grp_t *lck_group = NULL;
672 lck_mtx_t *new_lock = NULL;
673
674 grp_attrib = lck_grp_attr_alloc_init();
675 lck_grp_attr_setdefault(grp_attrib);
676 lck_group = lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib);
677 lck_grp_attr_free(grp_attrib);
678 lck_attrb = lck_attr_alloc_init();
679 lck_attr_setdefault(lck_attrb);
680 lck_attr_setdebug(lck_attrb);
681 new_lock = lck_mtx_alloc_init(lck_group, lck_attrb);
682 if (!OSCompareAndSwap((UInt32)0, (UInt32)new_lock, (UInt32*)&mtag_id_lock)) {
683 /*
684 * If the atomic swap fails, someone else has already
685 * done this work. We can free the stuff we allocated.
686 */
687 lck_mtx_free(new_lock, lck_group);
688 lck_grp_free(lck_group);
689 }
690 lck_attr_free(lck_attrb);
691 }
692
693 /* Look for an existing entry */
694 lck_mtx_lock(mtag_id_lock);
695 SLIST_FOREACH(entry, &mtag_id_list, next) {
696 if (strcmp(string, entry->string) == 0) {
697 break;
698 }
699 }
700
701 if (entry == NULL) {
702 if (create == 0) {
703 lck_mtx_unlock(mtag_id_lock);
704 return ENOENT;
705 }
706
707 entry = kalloc(MBUF_TAG_ID_ENTRY_SIZE(string));
708 if (entry == NULL) {
709 lck_mtx_unlock(mtag_id_lock);
710 return ENOMEM;
711 }
712
713 strcpy(entry->string, string);
714 entry->id = mtag_id_next;
715 mtag_id_next++;
716 SLIST_INSERT_HEAD(&mtag_id_list, entry, next);
717 }
718 lck_mtx_unlock(mtag_id_lock);
719
720 *out_id = entry->id;
721
722 return 0;
723 }
724
725 errno_t
726 mbuf_tag_id_find(
727 const char *string,
728 mbuf_tag_id_t *out_id)
729 {
730 return mbuf_tag_id_find_internal(string, (u_long*)out_id, 1);
731 }
732
733 errno_t
734 mbuf_tag_allocate(
735 mbuf_t mbuf,
736 mbuf_tag_id_t id,
737 mbuf_tag_type_t type,
738 size_t length,
739 mbuf_how_t how,
740 void** data_p)
741 {
742 struct m_tag *tag;
743
744 if (data_p != NULL)
745 *data_p = NULL;
746
747 /* Sanity check parameters */
748 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
749 id >= mtag_id_next || length < 1 || (length & 0xffff0000) != 0 ||
750 data_p == NULL) {
751 return EINVAL;
752 }
753
754 /* Make sure this mtag hasn't already been allocated */
755 tag = m_tag_locate(mbuf, id, type, NULL);
756 if (tag != NULL) {
757 return EEXIST;
758 }
759
760 /* Allocate an mtag */
761 tag = m_tag_alloc(id, type, length, how);
762 if (tag == NULL) {
763 return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
764 }
765
766 /* Attach the mtag and set *data_p */
767 m_tag_prepend(mbuf, tag);
768 *data_p = tag + 1;
769
770 return 0;
771 }
772
773 errno_t
774 mbuf_tag_find(
775 mbuf_t mbuf,
776 mbuf_tag_id_t id,
777 mbuf_tag_type_t type,
778 size_t* length,
779 void** data_p)
780 {
781 struct m_tag *tag;
782
783 if (length != NULL)
784 *length = 0;
785 if (data_p != NULL)
786 *data_p = NULL;
787
788 /* Sanity check parameters */
789 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
790 id >= mtag_id_next || length == NULL || data_p == NULL) {
791 return EINVAL;
792 }
793
794 /* Locate an mtag */
795 tag = m_tag_locate(mbuf, id, type, NULL);
796 if (tag == NULL) {
797 return ENOENT;
798 }
799
800 /* Copy out the pointer to the data and the lenght value */
801 *length = tag->m_tag_len;
802 *data_p = tag + 1;
803
804 return 0;
805 }
806
807 void
808 mbuf_tag_free(
809 mbuf_t mbuf,
810 mbuf_tag_id_t id,
811 mbuf_tag_type_t type)
812 {
813 struct m_tag *tag;
814
815 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
816 id >= mtag_id_next)
817 return;
818
819 tag = m_tag_locate(mbuf, id, type, NULL);
820 if (tag == NULL) {
821 return;
822 }
823
824 m_tag_delete(mbuf, tag);
825 return;
826 }
827
828 /* mbuf stats */
829 void mbuf_stats(struct mbuf_stat *stats)
830 {
831 stats->mbufs = mbstat.m_mbufs;
832 stats->clusters = mbstat.m_clusters;
833 stats->clfree = mbstat.m_clfree;
834 stats->drops = mbstat.m_drops;
835 stats->wait = mbstat.m_wait;
836 stats->drain = mbstat.m_drain;
837 __builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
838 stats->mcfail = mbstat.m_mcfail;
839 stats->mpfail = mbstat.m_mpfail;
840 stats->msize = mbstat.m_msize;
841 stats->mclbytes = mbstat.m_mclbytes;
842 stats->minclsize = mbstat.m_minclsize;
843 stats->mlen = mbstat.m_mlen;
844 stats->mhlen = mbstat.m_mhlen;
845 stats->bigclusters = mbstat.m_bigclusters;
846 stats->bigclfree = mbstat.m_bigclfree;
847 stats->bigmclbytes = mbstat.m_bigmclbytes;
848 }
849
850 errno_t
851 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks, mbuf_t *mbuf)
852 {
853 errno_t error;
854 struct mbuf *m;
855 unsigned int numpkts = 1;
856 unsigned int numchunks = maxchunks ? *maxchunks : 0;
857
858 if (packetlen == 0) {
859 error = EINVAL;
860 goto out;
861 }
862 m = m_allocpacket_internal(&numpkts, packetlen, maxchunks ? &numchunks : NULL, how, 1, 0);
863 if (m == 0) {
864 if (maxchunks && *maxchunks && numchunks > *maxchunks)
865 error = ENOBUFS;
866 else
867 error = ENOMEM;
868 } else {
869 error = 0;
870 *mbuf = m;
871 }
872 out:
873 return error;
874 }
875
876
877 /*
878 * mbuf_copyback differs from m_copyback in a few ways:
879 * 1) mbuf_copyback will allocate clusters for new mbufs we append
880 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
881 * 3) mbuf_copyback reports whether or not the operation succeeded
882 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
883 */
884 errno_t
885 mbuf_copyback(
886 mbuf_t m,
887 size_t off,
888 size_t len,
889 const void *data,
890 mbuf_how_t how)
891 {
892 size_t mlen;
893 mbuf_t m_start = m;
894 mbuf_t n;
895 int totlen = 0;
896 errno_t result = 0;
897 const char *cp = data;
898
899 if (m == NULL || len == 0 || data == NULL)
900 return EINVAL;
901
902 while (off > (mlen = m->m_len)) {
903 off -= mlen;
904 totlen += mlen;
905 if (m->m_next == 0) {
906 n = m_getclr(how, m->m_type);
907 if (n == 0) {
908 result = ENOBUFS;
909 goto out;
910 }
911 n->m_len = MIN(MLEN, len + off);
912 m->m_next = n;
913 }
914 m = m->m_next;
915 }
916
917 while (len > 0) {
918 mlen = MIN(m->m_len - off, len);
919 if (mlen < len && m->m_next == NULL && mbuf_trailingspace(m) > 0) {
920 size_t grow = MIN(mbuf_trailingspace(m), len - mlen);
921 mlen += grow;
922 m->m_len += grow;
923 }
924 bcopy(cp, off + (char*)mbuf_data(m), (unsigned)mlen);
925 cp += mlen;
926 len -= mlen;
927 mlen += off;
928 off = 0;
929 totlen += mlen;
930 if (len == 0)
931 break;
932 if (m->m_next == 0) {
933 n = m_get(how, m->m_type);
934 if (n == NULL) {
935 result = ENOBUFS;
936 goto out;
937 }
938 if (len > MINCLSIZE) {
939 /* cluter allocation failure is okay, we can grow chain */
940 mbuf_mclget(how, m->m_type, &n);
941 }
942 n->m_len = MIN(mbuf_maxlen(n), len);
943 m->m_next = n;
944 }
945 m = m->m_next;
946 }
947
948 out:
949 if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen))
950 m_start->m_pkthdr.len = totlen;
951
952 return result;
953 }