]> git.saurik.com Git - apple/xnu.git/blob - bsd/kern/kpi_mbuf.c
15c290aab77d26bf6424c5c5da8dee2c1a3b162b
[apple/xnu.git] / bsd / kern / kpi_mbuf.c
1 /*
2 * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #define __KPI__
24 //#include <sys/kpi_interface.h>
25
26 #include <sys/param.h>
27 #include <sys/mbuf.h>
28 #include <sys/socket.h>
29 #include <kern/debug.h>
30 #include <libkern/OSAtomic.h>
31 #include <kern/kalloc.h>
32 #include <string.h>
33
34 void mbuf_tag_id_first_last(u_long *first, u_long *last);
35 errno_t mbuf_tag_id_find_internal(const char *string, u_long *out_id, int create);
36
37 static const mbuf_flags_t mbuf_flags_mask = MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
38 MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
39 MBUF_LASTFRAG | MBUF_PROMISC;
40
41 void* mbuf_data(mbuf_t mbuf)
42 {
43 return m_mtod(mbuf);
44 }
45
46 void* mbuf_datastart(mbuf_t mbuf)
47 {
48 if (mbuf->m_flags & M_EXT)
49 return mbuf->m_ext.ext_buf;
50 if (mbuf->m_flags & M_PKTHDR)
51 return mbuf->m_pktdat;
52 return mbuf->m_dat;
53 }
54
55 errno_t mbuf_setdata(mbuf_t mbuf, void* data, size_t len)
56 {
57 size_t start = (size_t)((char*)mbuf_datastart(mbuf));
58 size_t maxlen = mbuf_maxlen(mbuf);
59
60 if ((size_t)data < start || ((size_t)data) + len > start + maxlen)
61 return EINVAL;
62 mbuf->m_data = data;
63 mbuf->m_len = len;
64
65 return 0;
66 }
67
68 errno_t mbuf_align_32(mbuf_t mbuf, size_t len)
69 {
70 if ((mbuf->m_flags & M_EXT) != 0 && m_mclhasreference(mbuf))
71 return ENOTSUP;
72 mbuf->m_data = mbuf_datastart(mbuf);
73 mbuf->m_data += ((mbuf_trailingspace(mbuf) - len) &~ (sizeof(u_int32_t) - 1));
74
75 return 0;
76 }
77
78 addr64_t mbuf_data_to_physical(void* ptr)
79 {
80 return (addr64_t)mcl_to_paddr(ptr);
81 }
82
83 errno_t mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
84 {
85 /* Must set *mbuf to NULL in failure case */
86 *mbuf = m_get(how, type);
87
88 return (*mbuf == NULL) ? ENOMEM : 0;
89 }
90
91 errno_t mbuf_gethdr(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
92 {
93 /* Must set *mbuf to NULL in failure case */
94 *mbuf = m_gethdr(how, type);
95
96 return (*mbuf == NULL) ? ENOMEM : 0;
97 }
98
99 extern struct mbuf * m_mbigget(struct mbuf *m, int nowait);
100
101 errno_t mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t* mbuf)
102 {
103 /* Must set *mbuf to NULL in failure case */
104 errno_t error = 0;
105 int created = 0;
106
107 if (mbuf == NULL)
108 return EINVAL;
109 if (*mbuf == NULL) {
110 *mbuf = m_get(how, type);
111 if (*mbuf == NULL)
112 return ENOMEM;
113 created = 1;
114 }
115 /*
116 * At the time this code was written, m_mclget and m_mbigget would always
117 * return the same value that was passed in to it.
118 */
119 if (size == MCLBYTES) {
120 *mbuf = m_mclget(*mbuf, how);
121 } else if (size == NBPG) {
122 *mbuf = m_mbigget(*mbuf, how);
123 } else {
124 error = EINVAL;
125 goto out;
126 }
127 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0)
128 error = ENOMEM;
129 out:
130 if (created && error != 0) {
131 error = ENOMEM;
132 mbuf_free(*mbuf);
133 *mbuf = NULL;
134 }
135 return error;
136 }
137
138 errno_t mbuf_mclget(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
139 {
140 /* Must set *mbuf to NULL in failure case */
141 errno_t error = 0;
142 int created = 0;
143 if (mbuf == NULL) return EINVAL;
144 if (*mbuf == NULL) {
145 error = mbuf_get(how, type, mbuf);
146 if (error)
147 return error;
148 created = 1;
149 }
150
151 /*
152 * At the time this code was written, m_mclget would always
153 * return the same value that was passed in to it.
154 */
155 *mbuf = m_mclget(*mbuf, how);
156
157 if (created && ((*mbuf)->m_flags & M_EXT) == 0) {
158 mbuf_free(*mbuf);
159 *mbuf = NULL;
160 }
161 if (*mbuf == NULL || ((*mbuf)->m_flags & M_EXT) == 0)
162 error = ENOMEM;
163 return error;
164 }
165
166
167 errno_t mbuf_getpacket(mbuf_how_t how, mbuf_t *mbuf)
168 {
169 /* Must set *mbuf to NULL in failure case */
170 errno_t error = 0;
171
172 *mbuf = m_getpacket_how(how);
173
174 if (*mbuf == NULL) {
175 if (how == MBUF_WAITOK)
176 error = ENOMEM;
177 else
178 error = EWOULDBLOCK;
179 }
180
181 return error;
182 }
183
184 mbuf_t mbuf_free(mbuf_t mbuf)
185 {
186 return m_free(mbuf);
187 }
188
189 void mbuf_freem(mbuf_t mbuf)
190 {
191 m_freem(mbuf);
192 }
193
194 int mbuf_freem_list(mbuf_t mbuf)
195 {
196 return m_freem_list(mbuf);
197 }
198
199 size_t mbuf_leadingspace(mbuf_t mbuf)
200 {
201 return m_leadingspace(mbuf);
202 }
203
204 size_t mbuf_trailingspace(mbuf_t mbuf)
205 {
206 return m_trailingspace(mbuf);
207 }
208
209 /* Manipulation */
210 errno_t mbuf_copym(mbuf_t src, size_t offset, size_t len,
211 mbuf_how_t how, mbuf_t *new_mbuf)
212 {
213 /* Must set *mbuf to NULL in failure case */
214 *new_mbuf = m_copym(src, offset, len, how);
215
216 return (*new_mbuf == NULL) ? ENOMEM : 0;
217 }
218
219 errno_t mbuf_dup(mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
220 {
221 /* Must set *new_mbuf to NULL in failure case */
222 *new_mbuf = m_dup(src, how);
223
224 return (*new_mbuf == NULL) ? ENOMEM : 0;
225 }
226
227 errno_t mbuf_prepend(mbuf_t *orig, size_t len, mbuf_how_t how)
228 {
229 /* Must set *orig to NULL in failure case */
230 *orig = m_prepend_2(*orig, len, how);
231
232 return (*orig == NULL) ? ENOMEM : 0;
233 }
234
235 errno_t mbuf_split(mbuf_t src, size_t offset,
236 mbuf_how_t how, mbuf_t *new_mbuf)
237 {
238 /* Must set *new_mbuf to NULL in failure case */
239 *new_mbuf = m_split(src, offset, how);
240
241 return (*new_mbuf == NULL) ? ENOMEM : 0;
242 }
243
244 errno_t mbuf_pullup(mbuf_t *mbuf, size_t len)
245 {
246 /* Must set *mbuf to NULL in failure case */
247 *mbuf = m_pullup(*mbuf, len);
248
249 return (*mbuf == NULL) ? ENOMEM : 0;
250 }
251
252 errno_t mbuf_pulldown(mbuf_t src, size_t *offset, size_t len, mbuf_t *location)
253 {
254 /* Must set *location to NULL in failure case */
255 int new_offset;
256 *location = m_pulldown(src, *offset, len, &new_offset);
257 *offset = new_offset;
258
259 return (*location == NULL) ? ENOMEM : 0;
260 }
261
262 void mbuf_adj(mbuf_t mbuf, int len)
263 {
264 m_adj(mbuf, len);
265 }
266
267 errno_t mbuf_copydata(mbuf_t m, size_t off, size_t len, void* out_data)
268 {
269 /* Copied m_copydata, added error handling (don't just panic) */
270 int count;
271
272 while (off > 0) {
273 if (m == 0)
274 return EINVAL;
275 if (off < (size_t)m->m_len)
276 break;
277 off -= m->m_len;
278 m = m->m_next;
279 }
280 while (len > 0) {
281 if (m == 0)
282 return EINVAL;
283 count = m->m_len - off > len ? len : m->m_len - off;
284 bcopy(mtod(m, caddr_t) + off, out_data, count);
285 len -= count;
286 out_data = ((char*)out_data) + count;
287 off = 0;
288 m = m->m_next;
289 }
290
291 return 0;
292 }
293
294 int mbuf_mclref(mbuf_t mbuf)
295 {
296 return m_mclref(mbuf);
297 }
298
299 int mbuf_mclunref(mbuf_t mbuf)
300 {
301 return m_mclunref(mbuf);
302 }
303
304 int mbuf_mclhasreference(mbuf_t mbuf)
305 {
306 if ((mbuf->m_flags & M_EXT))
307 return m_mclhasreference(mbuf);
308 else
309 return 0;
310 }
311
312
313 /* mbuf header */
314 mbuf_t mbuf_next(mbuf_t mbuf)
315 {
316 return mbuf->m_next;
317 }
318
319 errno_t mbuf_setnext(mbuf_t mbuf, mbuf_t next)
320 {
321 if (next && ((next)->m_nextpkt != NULL ||
322 (next)->m_type == MT_FREE)) return EINVAL;
323 mbuf->m_next = next;
324
325 return 0;
326 }
327
328 mbuf_t mbuf_nextpkt(mbuf_t mbuf)
329 {
330 return mbuf->m_nextpkt;
331 }
332
333 void mbuf_setnextpkt(mbuf_t mbuf, mbuf_t nextpkt)
334 {
335 mbuf->m_nextpkt = nextpkt;
336 }
337
338 size_t mbuf_len(mbuf_t mbuf)
339 {
340 return mbuf->m_len;
341 }
342
343 void mbuf_setlen(mbuf_t mbuf, size_t len)
344 {
345 mbuf->m_len = len;
346 }
347
348 size_t mbuf_maxlen(mbuf_t mbuf)
349 {
350 if (mbuf->m_flags & M_EXT)
351 return mbuf->m_ext.ext_size;
352 return &mbuf->m_dat[MLEN] - ((char*)mbuf_datastart(mbuf));
353 }
354
355 mbuf_type_t mbuf_type(mbuf_t mbuf)
356 {
357 return mbuf->m_type;
358 }
359
360 errno_t mbuf_settype(mbuf_t mbuf, mbuf_type_t new_type)
361 {
362 if (new_type == MBUF_TYPE_FREE) return EINVAL;
363
364 m_mchtype(mbuf, new_type);
365
366 return 0;
367 }
368
369 mbuf_flags_t mbuf_flags(mbuf_t mbuf)
370 {
371 return mbuf->m_flags & mbuf_flags_mask;
372 }
373
374 errno_t mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
375 {
376 if ((flags & ~mbuf_flags_mask) != 0) return EINVAL;
377 mbuf->m_flags = flags |
378 (mbuf->m_flags & ~mbuf_flags_mask);
379
380 return 0;
381 }
382
383 errno_t mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
384 {
385 if (((flags | mask) & ~mbuf_flags_mask) != 0) return EINVAL;
386
387 mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
388
389 return 0;
390 }
391
392 errno_t mbuf_copy_pkthdr(mbuf_t dest, mbuf_t src)
393 {
394 if (((src)->m_flags & M_PKTHDR) == 0)
395 return EINVAL;
396
397 m_copy_pkthdr(dest, src);
398
399 return 0;
400 }
401
402 size_t mbuf_pkthdr_len(mbuf_t mbuf)
403 {
404 return mbuf->m_pkthdr.len;
405 }
406
407 void mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
408 {
409 mbuf->m_pkthdr.len = len;
410 }
411
412 ifnet_t mbuf_pkthdr_rcvif(mbuf_t mbuf)
413 {
414 // If we reference count ifnets, we should take a reference here before returning
415 return mbuf->m_pkthdr.rcvif;
416 }
417
418 errno_t mbuf_pkthdr_setrcvif(mbuf_t mbuf, ifnet_t ifnet)
419 {
420 /* May want to walk ifnet list to determine if interface is valid */
421 mbuf->m_pkthdr.rcvif = (struct ifnet*)ifnet;
422 return 0;
423 }
424
425 void* mbuf_pkthdr_header(mbuf_t mbuf)
426 {
427 return mbuf->m_pkthdr.header;
428 }
429
430 void mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
431 {
432 mbuf->m_pkthdr.header = (void*)header;
433 }
434
435 /* mbuf aux data */
436 errno_t mbuf_aux_add(mbuf_t mbuf, int family, mbuf_type_t type, mbuf_t *aux_mbuf)
437 {
438 *aux_mbuf = m_aux_add(mbuf, family, type);
439 return (*aux_mbuf == NULL) ? ENOMEM : 0;
440 }
441
442 mbuf_t mbuf_aux_find(mbuf_t mbuf, int family, mbuf_type_t type)
443 {
444 return m_aux_find(mbuf, family, type);
445 }
446
447 void mbuf_aux_delete(mbuf_t mbuf, mbuf_t aux)
448 {
449 m_aux_delete(mbuf, aux);
450 }
451
452 void
453 mbuf_inbound_modified(mbuf_t mbuf)
454 {
455 /* Invalidate hardware generated checksum flags */
456 mbuf->m_pkthdr.csum_flags = 0;
457 }
458
459 extern void in_cksum_offset(struct mbuf* m, size_t ip_offset);
460 extern void in_delayed_cksum_offset(struct mbuf *m, int ip_offset);
461
462 void
463 mbuf_outbound_finalize(mbuf_t mbuf, u_long protocol_family, size_t protocol_offset)
464 {
465 if ((mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_IP)) == 0)
466 return;
467
468 /* Generate the packet in software, client needs it */
469 switch (protocol_family) {
470 case PF_INET:
471 if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
472 in_delayed_cksum_offset(mbuf, protocol_offset);
473 }
474
475 if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
476 in_cksum_offset(mbuf, protocol_offset);
477 }
478
479 mbuf->m_pkthdr.csum_flags &= ~(CSUM_DELAY_DATA | CSUM_DELAY_IP);
480 break;
481
482 default:
483 /*
484 * Not sure what to do here if anything.
485 * Hardware checksum code looked pretty IPv4 specific.
486 */
487 if ((mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_IP)) != 0)
488 panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%d)!\n", protocol_family);
489 }
490 }
491
492 errno_t
493 mbuf_set_vlan_tag(
494 mbuf_t mbuf,
495 u_int16_t vlan)
496 {
497 mbuf->m_pkthdr.csum_flags |= CSUM_VLAN_TAG_VALID;
498 mbuf->m_pkthdr.vlan_tag = vlan;
499
500 return 0;
501 }
502
503 errno_t
504 mbuf_get_vlan_tag(
505 mbuf_t mbuf,
506 u_int16_t *vlan)
507 {
508 if ((mbuf->m_pkthdr.csum_flags & CSUM_VLAN_TAG_VALID) == 0)
509 return ENXIO; // No vlan tag set
510
511 *vlan = mbuf->m_pkthdr.vlan_tag;
512
513 return 0;
514 }
515
516 errno_t
517 mbuf_clear_vlan_tag(
518 mbuf_t mbuf)
519 {
520 mbuf->m_pkthdr.csum_flags &= ~CSUM_VLAN_TAG_VALID;
521 mbuf->m_pkthdr.vlan_tag = 0;
522
523 return 0;
524 }
525
526 static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
527 MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP | MBUF_CSUM_REQ_SUM16;
528
529 errno_t
530 mbuf_set_csum_requested(
531 mbuf_t mbuf,
532 mbuf_csum_request_flags_t request,
533 u_int32_t value)
534 {
535 request &= mbuf_valid_csum_request_flags;
536 mbuf->m_pkthdr.csum_flags = (mbuf->m_pkthdr.csum_flags & 0xffff0000) | request;
537 mbuf->m_pkthdr.csum_data = value;
538
539 return 0;
540 }
541
542 errno_t
543 mbuf_get_csum_requested(
544 mbuf_t mbuf,
545 mbuf_csum_request_flags_t *request,
546 u_int32_t *value)
547 {
548 *request = mbuf->m_pkthdr.csum_flags;
549 *request &= mbuf_valid_csum_request_flags;
550 if (value != NULL) {
551 *value = mbuf->m_pkthdr.csum_data;
552 }
553
554 return 0;
555 }
556
557 errno_t
558 mbuf_clear_csum_requested(
559 mbuf_t mbuf)
560 {
561 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
562 mbuf->m_pkthdr.csum_data = 0;
563
564 return 0;
565 }
566
567 static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
568 MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
569 MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_TCP_SUM16;
570
571 errno_t
572 mbuf_set_csum_performed(
573 mbuf_t mbuf,
574 mbuf_csum_performed_flags_t performed,
575 u_int32_t value)
576 {
577 performed &= mbuf_valid_csum_performed_flags;
578 mbuf->m_pkthdr.csum_flags = (mbuf->m_pkthdr.csum_flags & 0xffff0000) | performed;
579 mbuf->m_pkthdr.csum_data = value;
580
581 return 0;
582 }
583
584 errno_t
585 mbuf_get_csum_performed(
586 mbuf_t mbuf,
587 mbuf_csum_performed_flags_t *performed,
588 u_int32_t *value)
589 {
590 *performed = mbuf->m_pkthdr.csum_flags & mbuf_valid_csum_performed_flags;
591 *value = mbuf->m_pkthdr.csum_data;
592
593 return 0;
594 }
595
596 errno_t
597 mbuf_clear_csum_performed(
598 mbuf_t mbuf)
599 {
600 mbuf->m_pkthdr.csum_flags &= 0xffff0000;
601 mbuf->m_pkthdr.csum_data = 0;
602
603 return 0;
604 }
605
606 /*
607 * Mbuf tag KPIs
608 */
609
610 struct mbuf_tag_id_entry {
611 SLIST_ENTRY(mbuf_tag_id_entry) next;
612 mbuf_tag_id_t id;
613 char string[];
614 };
615
616 #define MBUF_TAG_ID_ENTRY_SIZE(__str) \
617 ((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
618 strlen(__str) + 1)
619
620 #define MTAG_FIRST_ID 1000
621 static u_long mtag_id_next = MTAG_FIRST_ID;
622 static SLIST_HEAD(,mbuf_tag_id_entry) mtag_id_list = {NULL};
623 static lck_mtx_t *mtag_id_lock = NULL;
624
625 __private_extern__ void
626 mbuf_tag_id_first_last(
627 u_long *first,
628 u_long *last)
629 {
630 *first = MTAG_FIRST_ID;
631 *last = mtag_id_next - 1;
632 }
633
634 __private_extern__ errno_t
635 mbuf_tag_id_find_internal(
636 const char *string,
637 u_long *out_id,
638 int create)
639 {
640 struct mbuf_tag_id_entry *entry = NULL;
641
642
643 *out_id = 0;
644
645 if (string == NULL || out_id == NULL) {
646 return EINVAL;
647 }
648
649 /* Don't bother allocating the lock if we're only doing a lookup */
650 if (create == 0 && mtag_id_lock == NULL)
651 return ENOENT;
652
653 /* Allocate lock if necessary */
654 if (mtag_id_lock == NULL) {
655 lck_grp_attr_t *grp_attrib = NULL;
656 lck_attr_t *lck_attrb = NULL;
657 lck_grp_t *lck_group = NULL;
658 lck_mtx_t *new_lock = NULL;
659
660 grp_attrib = lck_grp_attr_alloc_init();
661 lck_grp_attr_setdefault(grp_attrib);
662 lck_group = lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib);
663 lck_grp_attr_free(grp_attrib);
664 lck_attrb = lck_attr_alloc_init();
665 lck_attr_setdefault(lck_attrb);
666 lck_attr_setdebug(lck_attrb);
667 new_lock = lck_mtx_alloc_init(lck_group, lck_attrb);
668 if (!OSCompareAndSwap((UInt32)0, (UInt32)new_lock, (UInt32*)&mtag_id_lock)) {
669 /*
670 * If the atomic swap fails, someone else has already
671 * done this work. We can free the stuff we allocated.
672 */
673 lck_mtx_free(new_lock, lck_group);
674 lck_grp_free(lck_group);
675 }
676 lck_attr_free(lck_attrb);
677 }
678
679 /* Look for an existing entry */
680 lck_mtx_lock(mtag_id_lock);
681 SLIST_FOREACH(entry, &mtag_id_list, next) {
682 if (strcmp(string, entry->string) == 0) {
683 break;
684 }
685 }
686
687 if (entry == NULL) {
688 if (create == 0) {
689 lck_mtx_unlock(mtag_id_lock);
690 return ENOENT;
691 }
692
693 entry = kalloc(MBUF_TAG_ID_ENTRY_SIZE(string));
694 if (entry == NULL) {
695 lck_mtx_unlock(mtag_id_lock);
696 return ENOMEM;
697 }
698
699 strcpy(entry->string, string);
700 entry->id = mtag_id_next;
701 mtag_id_next++;
702 SLIST_INSERT_HEAD(&mtag_id_list, entry, next);
703 }
704 lck_mtx_unlock(mtag_id_lock);
705
706 *out_id = entry->id;
707
708 return 0;
709 }
710
711 errno_t
712 mbuf_tag_id_find(
713 const char *string,
714 mbuf_tag_id_t *out_id)
715 {
716 return mbuf_tag_id_find_internal(string, (u_long*)out_id, 1);
717 }
718
719 errno_t
720 mbuf_tag_allocate(
721 mbuf_t mbuf,
722 mbuf_tag_id_t id,
723 mbuf_tag_type_t type,
724 size_t length,
725 mbuf_how_t how,
726 void** data_p)
727 {
728 struct m_tag *tag;
729
730 if (data_p != NULL)
731 *data_p = NULL;
732
733 /* Sanity check parameters */
734 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
735 id >= mtag_id_next || length < 1 || (length & 0xffff0000) != 0 ||
736 data_p == NULL) {
737 return EINVAL;
738 }
739
740 /* Make sure this mtag hasn't already been allocated */
741 tag = m_tag_locate(mbuf, id, type, NULL);
742 if (tag != NULL) {
743 return EEXIST;
744 }
745
746 /* Allocate an mtag */
747 tag = m_tag_alloc(id, type, length, how);
748 if (tag == NULL) {
749 return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
750 }
751
752 /* Attach the mtag and set *data_p */
753 m_tag_prepend(mbuf, tag);
754 *data_p = tag + 1;
755
756 return 0;
757 }
758
759 errno_t
760 mbuf_tag_find(
761 mbuf_t mbuf,
762 mbuf_tag_id_t id,
763 mbuf_tag_type_t type,
764 size_t* length,
765 void** data_p)
766 {
767 struct m_tag *tag;
768
769 if (length != NULL)
770 *length = 0;
771 if (data_p != NULL)
772 *data_p = NULL;
773
774 /* Sanity check parameters */
775 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
776 id >= mtag_id_next || length == NULL || data_p == NULL) {
777 return EINVAL;
778 }
779
780 /* Locate an mtag */
781 tag = m_tag_locate(mbuf, id, type, NULL);
782 if (tag == NULL) {
783 return ENOENT;
784 }
785
786 /* Copy out the pointer to the data and the lenght value */
787 *length = tag->m_tag_len;
788 *data_p = tag + 1;
789
790 return 0;
791 }
792
793 void
794 mbuf_tag_free(
795 mbuf_t mbuf,
796 mbuf_tag_id_t id,
797 mbuf_tag_type_t type)
798 {
799 struct m_tag *tag;
800
801 if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
802 id >= mtag_id_next)
803 return;
804
805 tag = m_tag_locate(mbuf, id, type, NULL);
806 if (tag == NULL) {
807 return;
808 }
809
810 m_tag_delete(mbuf, tag);
811 return;
812 }
813
814 /* mbuf stats */
815 void mbuf_stats(struct mbuf_stat *stats)
816 {
817 stats->mbufs = mbstat.m_mbufs;
818 stats->clusters = mbstat.m_clusters;
819 stats->clfree = mbstat.m_clfree;
820 stats->drops = mbstat.m_drops;
821 stats->wait = mbstat.m_wait;
822 stats->drain = mbstat.m_drain;
823 __builtin_memcpy(stats->mtypes, mbstat.m_mtypes, sizeof(stats->mtypes));
824 stats->mcfail = mbstat.m_mcfail;
825 stats->mpfail = mbstat.m_mpfail;
826 stats->msize = mbstat.m_msize;
827 stats->mclbytes = mbstat.m_mclbytes;
828 stats->minclsize = mbstat.m_minclsize;
829 stats->mlen = mbstat.m_mlen;
830 stats->mhlen = mbstat.m_mhlen;
831 stats->bigclusters = mbstat.m_bigclusters;
832 stats->bigclfree = mbstat.m_bigclfree;
833 stats->bigmclbytes = mbstat.m_bigmclbytes;
834 }
835
836 errno_t
837 mbuf_allocpacket(mbuf_how_t how, size_t packetlen, unsigned int *maxchunks, mbuf_t *mbuf)
838 {
839 errno_t error;
840 struct mbuf *m;
841 unsigned int numpkts = 1;
842 unsigned int numchunks = maxchunks ? *maxchunks : 0;
843
844 if (packetlen == 0) {
845 error = EINVAL;
846 goto out;
847 }
848 m = m_allocpacket_internal(&numpkts, packetlen, maxchunks ? &numchunks : NULL, how, 1, 0);
849 if (m == 0) {
850 if (maxchunks && *maxchunks && numchunks > *maxchunks)
851 error = ENOBUFS;
852 else
853 error = ENOMEM;
854 } else {
855 error = 0;
856 *mbuf = m;
857 }
858 out:
859 return error;
860 }
861
862
863 /*
864 * mbuf_copyback differs from m_copyback in a few ways:
865 * 1) mbuf_copyback will allocate clusters for new mbufs we append
866 * 2) mbuf_copyback will grow the last mbuf in the chain if possible
867 * 3) mbuf_copyback reports whether or not the operation succeeded
868 * 4) mbuf_copyback allows the caller to specify M_WAITOK or M_NOWAIT
869 */
870 errno_t
871 mbuf_copyback(
872 mbuf_t m,
873 size_t off,
874 size_t len,
875 const void *data,
876 mbuf_how_t how)
877 {
878 size_t mlen;
879 mbuf_t m_start = m;
880 mbuf_t n;
881 int totlen = 0;
882 errno_t result = 0;
883 const char *cp = data;
884
885 if (m == NULL || len == 0 || data == NULL)
886 return EINVAL;
887
888 while (off > (mlen = m->m_len)) {
889 off -= mlen;
890 totlen += mlen;
891 if (m->m_next == 0) {
892 n = m_getclr(how, m->m_type);
893 if (n == 0) {
894 result = ENOBUFS;
895 goto out;
896 }
897 n->m_len = MIN(MLEN, len + off);
898 m->m_next = n;
899 }
900 m = m->m_next;
901 }
902
903 while (len > 0) {
904 mlen = MIN(m->m_len - off, len);
905 if (mlen < len && m->m_next == NULL && mbuf_trailingspace(m) > 0) {
906 size_t grow = MIN(mbuf_trailingspace(m), len - mlen);
907 mlen += grow;
908 m->m_len += grow;
909 }
910 bcopy(cp, off + (char*)mbuf_data(m), (unsigned)mlen);
911 cp += mlen;
912 len -= mlen;
913 mlen += off;
914 off = 0;
915 totlen += mlen;
916 if (len == 0)
917 break;
918 if (m->m_next == 0) {
919 n = m_get(how, m->m_type);
920 if (n == NULL) {
921 result = ENOBUFS;
922 goto out;
923 }
924 if (len > MINCLSIZE) {
925 /* cluter allocation failure is okay, we can grow chain */
926 mbuf_mclget(how, m->m_type, &n);
927 }
928 n->m_len = MIN(mbuf_maxlen(n), len);
929 m->m_next = n;
930 }
931 m = m->m_next;
932 }
933
934 out:
935 if ((m_start->m_flags & M_PKTHDR) && (m_start->m_pkthdr.len < totlen))
936 m_start->m_pkthdr.len = totlen;
937
938 return result;
939 }