]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kmod.c
406028fd232c022e699f5dbd758c08a06d0cbe24
[apple/xnu.git] / osfmk / kern / kmod.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Copyright (c) 1999 Apple Computer, Inc. All rights reserved.
24 *
25 * HISTORY
26 *
27 * 1999 Mar 29 rsulack created.
28 */
29
30 #include <mach/mach_types.h>
31 #include <mach/vm_types.h>
32 #include <mach/kern_return.h>
33 #include <kern/kern_types.h>
34 #include <vm/vm_kern.h>
35 #include <kern/thread.h>
36
37 #include <mach_host.h>
38
39 kmod_info_t *kmod = 0;
40 static int kmod_index = 1;
41
42 decl_simple_lock_data(,kmod_lock)
43 decl_simple_lock_data(,kmod_queue_lock)
44
45 typedef struct cmd_queue_entry {
46 queue_chain_t links;
47 vm_address_t data;
48 vm_size_t size;
49 } cmd_queue_entry_t;
50
51 queue_head_t kmod_cmd_queue;
52
53 void
54 kmod_init()
55 {
56 simple_lock_init(&kmod_lock, ETAP_MISC_Q);
57 simple_lock_init(&kmod_queue_lock, ETAP_MISC_Q);
58 queue_init(&kmod_cmd_queue);
59 }
60
61 kmod_info_t *
62 kmod_lookupbyid(kmod_t id)
63 {
64 kmod_info_t *k = 0;
65
66 k = kmod;
67 while (k) {
68 if (k->id == id) break;
69 k = k->next;
70 }
71
72 return k;
73 }
74
75 kmod_info_t *
76 kmod_lookupbyname(char * name)
77 {
78 kmod_info_t *k = 0;
79
80 k = kmod;
81 while (k) {
82 if (!strcmp(k->name, name)) break;
83 k = k->next;
84 }
85
86 return k;
87 }
88
89 // XXX add a nocopy flag??
90
91 kern_return_t
92 kmod_queue_cmd(vm_address_t data, vm_size_t size)
93 {
94 kern_return_t rc;
95 cmd_queue_entry_t *e = (cmd_queue_entry_t *)kalloc(sizeof(struct cmd_queue_entry));
96 if (!e) return KERN_RESOURCE_SHORTAGE;
97
98 rc = kmem_alloc(kernel_map, &e->data, size);
99 if (rc != KERN_SUCCESS) {
100 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
101 return rc;
102 }
103 e->size = size;
104 bcopy((void *)data, (void *)e->data, size);
105
106 simple_lock(&kmod_queue_lock);
107 enqueue_tail(&kmod_cmd_queue, (queue_entry_t)e);
108 simple_unlock(&kmod_queue_lock);
109
110 thread_wakeup_one((event_t)&kmod_cmd_queue);
111
112 return KERN_SUCCESS;
113 }
114
115 kern_return_t
116 kmod_load_extension(char *name)
117 {
118 kmod_load_extension_cmd_t *data;
119 vm_size_t size;
120
121 size = sizeof(kmod_load_extension_cmd_t);
122 data = (kmod_load_extension_cmd_t *)kalloc(size);
123 if (!data) return KERN_RESOURCE_SHORTAGE;
124
125 data->type = KMOD_LOAD_EXTENSION_PACKET;
126 strncpy(data->name, name, KMOD_MAX_NAME);
127
128 return kmod_queue_cmd((vm_address_t)data, size);
129 }
130
131 kern_return_t
132 kmod_load_extension_with_dependencies(char *name, char **dependencies)
133 {
134 kmod_load_with_dependencies_cmd_t *data;
135 vm_size_t size;
136 char **c;
137 int i, count = 0;
138
139 c = dependencies;
140 if (c) {
141 while (*c) {
142 count++; c++;
143 }
144 }
145 size = sizeof(int) + KMOD_MAX_NAME * (count + 1) + 1;
146 data = (kmod_load_with_dependencies_cmd_t *)kalloc(size);
147 if (!data) return KERN_RESOURCE_SHORTAGE;
148
149 data->type = KMOD_LOAD_WITH_DEPENDENCIES_PACKET;
150 strncpy(data->name, name, KMOD_MAX_NAME);
151
152 c = dependencies;
153 for (i=0; i < count; i++) {
154 strncpy(data->dependencies[i], *c, KMOD_MAX_NAME);
155 c++;
156 }
157 data->dependencies[count][0] = 0;
158
159 return kmod_queue_cmd((vm_address_t)data, size);
160 }
161 kern_return_t
162 kmod_send_generic(int type, void *generic_data, int size)
163 {
164 kmod_generic_cmd_t *data;
165
166 data = (kmod_generic_cmd_t *)kalloc(size + sizeof(int));
167 if (!data) return KERN_RESOURCE_SHORTAGE;
168
169 data->type = type;
170 bcopy(data->data, generic_data, size);
171
172 return kmod_queue_cmd((vm_address_t)data, size + sizeof(int));
173 }
174
175 kern_return_t
176 kmod_create_internal(kmod_info_t *info, kmod_t *id)
177 {
178 kern_return_t rc;
179
180 if (!info) return KERN_INVALID_ADDRESS;
181
182 // double check for page alignment
183 if ((info->address | info->hdr_size) & (PAGE_SIZE - 1)) {
184 return KERN_INVALID_ADDRESS;
185 }
186
187 rc = vm_map_wire(kernel_map, info->address + info->hdr_size,
188 info->address + info->size, VM_PROT_DEFAULT, FALSE);
189 if (rc != KERN_SUCCESS) {
190 return rc;
191 }
192
193 simple_lock(&kmod_lock);
194
195 // check to see if already loaded
196 if (kmod_lookupbyname(info->name)) {
197 simple_unlock(&kmod_lock);
198 rc = vm_map_unwire(kernel_map, info->address + info->hdr_size,
199 info->address + info->size, FALSE);
200 assert(rc == KERN_SUCCESS);
201 return KERN_INVALID_ARGUMENT;
202 }
203
204 info->id = kmod_index++;
205 info->reference_count = 0;
206
207 info->next = kmod;
208 kmod = info;
209
210 *id = info->id;
211
212 simple_unlock(&kmod_lock);
213
214 printf("kmod_create: %s (id %d), %d pages loaded at 0x%x, header size 0x%x\n",
215 info->name, info->id, info->size / PAGE_SIZE, info->address, info->hdr_size);
216
217 return KERN_SUCCESS;
218 }
219
220
221 kern_return_t
222 kmod_create(host_priv_t host_priv,
223 kmod_info_t *info,
224 kmod_t *id)
225 {
226 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
227 return kmod_create_internal(info, id);
228 }
229
230 kern_return_t
231 kmod_create_fake(char *name, char *version)
232 {
233 kmod_info_t *info;
234
235 info = (kmod_info_t *)kalloc(sizeof(kmod_info_t));
236 if (!info) {
237 return KERN_RESOURCE_SHORTAGE;
238 }
239
240 // make de fake
241 info->info_version = KMOD_INFO_VERSION;
242 bcopy(name, info->name, KMOD_MAX_NAME);
243 bcopy(version, info->version, KMOD_MAX_NAME);
244 info->reference_count = 1; // keep it from unloading, starting, stopping
245 info->reference_list = 0;
246 info->address = info->size = info->hdr_size = 0;
247 info->start = info->stop = 0;
248
249 simple_lock(&kmod_lock);
250
251 // check to see if already "loaded"
252 if (kmod_lookupbyname(info->name)) {
253 simple_unlock(&kmod_lock);
254 return KERN_INVALID_ARGUMENT;
255 }
256
257 info->id = kmod_index++;
258
259 info->next = kmod;
260 kmod = info;
261
262 simple_unlock(&kmod_lock);
263
264 return KERN_SUCCESS;
265 }
266
267 kern_return_t
268 kmod_destroy_internal(kmod_t id)
269 {
270 kern_return_t rc;
271 kmod_info_t *k;
272 kmod_info_t *p;
273
274 simple_lock(&kmod_lock);
275
276 k = p = kmod;
277 while (k) {
278 if (k->id == id) {
279 kmod_reference_t *r, *t;
280
281 if (k->reference_count != 0) {
282 simple_unlock(&kmod_lock);
283 return KERN_INVALID_ARGUMENT;
284 }
285
286 if (k == p) { // first element
287 kmod = k->next;
288 } else {
289 p->next = k->next;
290 }
291 simple_unlock(&kmod_lock);
292
293 r = k->reference_list;
294 while (r) {
295 r->info->reference_count--;
296 t = r;
297 r = r->next;
298 kfree((vm_offset_t)t, sizeof(struct kmod_reference));
299 }
300
301 printf("kmod_destroy: %s (id %d), deallocating %d pages starting at 0x%x\n",
302 k->name, k->id, k->size / PAGE_SIZE, k->address);
303
304 rc = vm_map_unwire(kernel_map, k->address + k->hdr_size,
305 k->address + k->size, FALSE);
306 assert(rc == KERN_SUCCESS);
307
308 rc = vm_deallocate(kernel_map, k->address, k->size);
309 assert(rc == KERN_SUCCESS);
310
311 return KERN_SUCCESS;
312 }
313 p = k;
314 k = k->next;
315 }
316
317 simple_unlock(&kmod_lock);
318
319 return KERN_INVALID_ARGUMENT;
320 }
321
322
323 kern_return_t
324 kmod_destroy(host_priv_t host_priv,
325 kmod_t id)
326 {
327 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
328 return kmod_destroy_internal(id);
329 }
330
331
332 kern_return_t
333 kmod_start_or_stop(
334 kmod_t id,
335 int start,
336 kmod_args_t *data,
337 mach_msg_type_number_t *dataCount)
338 {
339 kern_return_t rc = KERN_SUCCESS;
340 void * user_data = 0;
341 kern_return_t (*func)();
342 kmod_info_t *k;
343
344 simple_lock(&kmod_lock);
345
346 k = kmod_lookupbyid(id);
347 if (!k || k->reference_count) {
348 simple_unlock(&kmod_lock);
349 rc = KERN_INVALID_ARGUMENT;
350 goto finish;
351 }
352
353 if (start) {
354 func = (void *)k->start;
355 } else {
356 func = (void *)k->stop;
357 }
358
359 simple_unlock(&kmod_lock);
360
361 //
362 // call kmod entry point
363 //
364 if (data && dataCount && *data && *dataCount) {
365 vm_map_copyout(kernel_map, (vm_offset_t *)&user_data, (vm_map_copy_t)*data);
366 }
367
368 rc = (*func)(k, user_data);
369
370 finish:
371
372 if (user_data) {
373 (void) vm_deallocate(kernel_map, (vm_offset_t)user_data, *dataCount);
374 }
375 if (data) *data = 0;
376 if (dataCount) *dataCount = 0;
377
378 return rc;
379 }
380
381
382 /*
383 * The retain and release calls take no user data, but the caller
384 * may have sent some in error (the MIG definition allows it).
385 * If this is the case, they will just return that same data
386 * right back to the caller (since they never touch the *data and
387 * *dataCount fields).
388 */
389 kern_return_t
390 kmod_retain(kmod_t id)
391 {
392 kern_return_t rc = KERN_SUCCESS;
393
394 kmod_info_t *t; // reference to
395 kmod_info_t *f; // reference from
396 kmod_reference_t *r = 0;
397
398 r = (kmod_reference_t *)kalloc(sizeof(struct kmod_reference));
399 if (!r) {
400 rc = KERN_RESOURCE_SHORTAGE;
401 goto finish;
402 }
403
404 simple_lock(&kmod_lock);
405
406 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
407 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
408 if (!t || !f) {
409 simple_unlock(&kmod_lock);
410 if (r) kfree((vm_offset_t)r, sizeof(struct kmod_reference));
411 rc = KERN_INVALID_ARGUMENT;
412 goto finish;
413 }
414
415 r->next = f->reference_list;
416 r->info = t;
417 f->reference_list = r;
418 t->reference_count++;
419
420 simple_unlock(&kmod_lock);
421
422 finish:
423
424 return rc;
425 }
426
427
428 kern_return_t
429 kmod_release(kmod_t id)
430 {
431 kern_return_t rc = KERN_INVALID_ARGUMENT;
432
433 kmod_info_t *t; // reference to
434 kmod_info_t *f; // reference from
435 kmod_reference_t *r = 0;
436 kmod_reference_t * p;
437
438 simple_lock(&kmod_lock);
439
440 t = kmod_lookupbyid(KMOD_UNPACK_TO_ID(id));
441 f = kmod_lookupbyid(KMOD_UNPACK_FROM_ID(id));
442 if (!t || !f) {
443 rc = KERN_INVALID_ARGUMENT;
444 goto finish;
445 }
446
447 p = r = f->reference_list;
448 while (r) {
449 if (r->info == t) {
450 if (p == r) { // first element
451 f->reference_list = r->next;
452 } else {
453 p->next = r->next;
454 }
455 r->info->reference_count--;
456
457 simple_unlock(&kmod_lock);
458 kfree((vm_offset_t)r, sizeof(struct kmod_reference));
459 rc = KERN_SUCCESS;
460 goto finish;
461 }
462 p = r;
463 r = r->next;
464 }
465
466 simple_unlock(&kmod_lock);
467
468 finish:
469
470 return rc;
471 }
472
473
474 kern_return_t
475 kmod_control(host_priv_t host_priv,
476 kmod_t id,
477 kmod_control_flavor_t flavor,
478 kmod_args_t *data,
479 mach_msg_type_number_t *dataCount)
480 {
481 kern_return_t rc = KERN_SUCCESS;
482
483 if (host_priv == HOST_PRIV_NULL) return KERN_INVALID_HOST;
484
485 switch (flavor) {
486
487 case KMOD_CNTL_START:
488 case KMOD_CNTL_STOP:
489 {
490 rc = kmod_start_or_stop(id, (flavor == KMOD_CNTL_START),
491 data, dataCount);
492 break;
493 }
494
495 case KMOD_CNTL_RETAIN:
496 {
497 rc = kmod_retain(id);
498 break;
499 }
500
501 case KMOD_CNTL_RELEASE:
502 {
503 rc = kmod_release(id);
504 break;
505 }
506
507 case KMOD_CNTL_GET_CMD: {
508
509 cmd_queue_entry_t *e;
510
511 /*
512 * Throw away any data the user may have sent in error.
513 * We must do this, because we are likely to return to
514 * some data for these commands (thus causing a leak of
515 * whatever data the user sent us in error).
516 */
517 if (*data && *dataCount) {
518 vm_map_copy_discard(*data);
519 *data = 0;
520 *dataCount = 0;
521 }
522
523 simple_lock(&kmod_queue_lock);
524
525 if (queue_empty(&kmod_cmd_queue)) {
526 assert_wait((event_t)&kmod_cmd_queue, THREAD_ABORTSAFE);
527 simple_unlock(&kmod_queue_lock);
528 thread_block((void(*)(void))0);
529 simple_lock(&kmod_queue_lock);
530 if (queue_empty(&kmod_cmd_queue)) {
531 // we must have been interrupted!
532 simple_unlock(&kmod_queue_lock);
533 return KERN_ABORTED;
534 }
535 }
536 e = (cmd_queue_entry_t *)dequeue_head(&kmod_cmd_queue);
537
538 simple_unlock(&kmod_queue_lock);
539
540 rc = vm_map_copyin(kernel_map, e->data, e->size, TRUE, (vm_map_copy_t *)data);
541 if (rc) {
542 simple_lock(&kmod_queue_lock);
543 enqueue_head(&kmod_cmd_queue, (queue_entry_t)e);
544 simple_unlock(&kmod_queue_lock);
545 *data = 0;
546 *dataCount = 0;
547 return rc;
548 }
549 *dataCount = e->size;
550
551 kfree((vm_offset_t)e, sizeof(struct cmd_queue_entry));
552
553 break;
554 }
555
556 default:
557 rc = KERN_INVALID_ARGUMENT;
558 }
559
560 return rc;
561 };
562
563
564 kern_return_t
565 kmod_get_info(host_t host,
566 kmod_info_array_t *kmods,
567 mach_msg_type_number_t *kmodCount)
568 {
569 vm_offset_t data;
570 kmod_info_t *k, *p1;
571 kmod_reference_t *r, *p2;
572 int ref_count;
573 unsigned size = 0;
574 kern_return_t rc = KERN_SUCCESS;
575
576 *kmods = (void *)0;
577 *kmodCount = 0;
578
579 retry:
580 simple_lock(&kmod_lock);
581 size = 0;
582 k = kmod;
583 while (k) {
584 size += sizeof(kmod_info_t);
585 r = k->reference_list;
586 while (r) {
587 size +=sizeof(kmod_reference_t);
588 r = r->next;
589 }
590 k = k->next;
591 }
592 simple_unlock(&kmod_lock);
593 if (!size) return KERN_SUCCESS;
594
595 rc = kmem_alloc(kernel_map, &data, size);
596 if (rc) return rc;
597
598 // copy kmod into data, retry if kmod's size has changed (grown)
599 // the copied out data is tweeked to figure what's what at user level
600 // change the copied out k->next pointers to point to themselves
601 // change the k->reference into a count, tack the references on
602 // the end of the data packet in the order they are found
603
604 simple_lock(&kmod_lock);
605 k = kmod; p1 = (kmod_info_t *)data;
606 while (k) {
607 if ((p1 + 1) > (kmod_info_t *)(data + size)) {
608 simple_unlock(&kmod_lock);
609 kmem_free(kernel_map, data, size);
610 goto retry;
611 }
612
613 *p1 = *k;
614 if (k->next) p1->next = k;
615 p1++; k = k->next;
616 }
617
618 p2 = (kmod_reference_t *)p1;
619 k = kmod; p1 = (kmod_info_t *)data;
620 while (k) {
621 r = k->reference_list; ref_count = 0;
622 while (r) {
623 if ((p2 + 1) > (kmod_reference_t *)(data + size)) {
624 simple_unlock(&kmod_lock);
625 kmem_free(kernel_map, data, size);
626 goto retry;
627 }
628 // note the last 'k' in the chain has its next == 0
629 // since there can only be one like that,
630 // this case is handled by the caller
631 *p2 = *r;
632 p2++; r = r->next; ref_count++;
633 }
634 p1->reference_list = (kmod_reference_t *)ref_count;
635 p1++; k = k->next;
636 }
637 simple_unlock(&kmod_lock);
638
639 rc = vm_map_copyin(kernel_map, data, size, TRUE, (vm_map_copy_t *)kmods);
640 if (rc) {
641 kmem_free(kernel_map, data, size);
642 *kmods = 0;
643 *kmodCount = 0;
644 return rc;
645 }
646 *kmodCount = size;
647
648 return KERN_SUCCESS;
649 }
650
651 #include <mach-o/loader.h>
652
653 extern void *getsectdatafromheader(struct mach_header *mhp,
654 const char *segname,
655 const char *sectname,
656 int *size);
657
658 static kern_return_t
659 kmod_call_funcs_in_section(struct mach_header *header, const char *sectName)
660 {
661 typedef void (*Routine)(void);
662 Routine * routines;
663 int size, i;
664
665 if (header->magic != MH_MAGIC) {
666 return KERN_INVALID_ARGUMENT;
667 }
668
669 routines = (Routine *) getsectdatafromheader(header, SEG_TEXT, sectName, &size);
670 if (!routines) return KERN_SUCCESS;
671
672 size /= sizeof(Routine);
673 for (i = 0; i < size; i++) {
674 (*routines[i])();
675 }
676
677 return KERN_SUCCESS;
678 }
679
680 kern_return_t
681 kmod_initialize_cpp(kmod_info_t *info)
682 {
683 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__constructor");
684 }
685
686 kern_return_t
687 kmod_finalize_cpp(kmod_info_t *info)
688 {
689 return kmod_call_funcs_in_section((struct mach_header *)info->address, "__destructor");
690 }
691
692 kern_return_t
693 kmod_default_start(struct kmod_info *ki, void *data)
694 {
695 return KMOD_RETURN_SUCCESS;
696 }
697
698 kern_return_t
699 kmod_default_stop(struct kmod_info *ki, void *data)
700 {
701 return KMOD_RETURN_SUCCESS;
702 }
703
704 #define IS_IN_BACKTRACE 0xdeadbeef
705 #define IS_A_DEPENDENCY 0xbeefdead
706
707 void
708 kmod_dump(vm_offset_t *addr, unsigned int cnt)
709 {
710 kmod_info_t *k;
711 kmod_reference_t *r;
712 int i, found_one = 0;
713
714 // find backtrace addresses that are inside a kmod
715 for (i=0; i < cnt; i++, addr++) {
716 k = kmod;
717 while (k) {
718 // XXX - validate page(s) that k points to
719 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
720 printf("kmod scan stopped due to missing page: %08X\n", k);
721 break;
722 }
723 if ((*addr >= k->address) && (*addr < (k->address + k->size))) {
724 // got one, blast info_version, we don't need it at this point
725 k->info_version = IS_IN_BACKTRACE;
726 found_one++;
727 break;
728 }
729 k = k->next;
730 }
731 }
732 if (!found_one) return;
733
734 printf("kernel modules in backtrace: ");
735 k = kmod;
736 while (k) {
737 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
738 printf("kmod scan stopped due to missing page: %08X\n", k);
739 break;
740 }
741 if (k->info_version == IS_IN_BACKTRACE) {
742 printf("%s(%s)@0x%x ", k->name, k->version, k->address);
743 }
744 k = k->next;
745 }
746 printf("\n");
747
748 // look for dependencies
749 k = kmod; found_one = 0;
750 while (k) {
751 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
752 printf("kmod dependency scan stopped due to missing page: %08X\n", k);
753 break;
754 }
755 if (k->info_version == IS_IN_BACKTRACE) {
756 r = k->reference_list;
757 while (r) {
758 // XXX - validate page(s) that r and r->info point to
759 if(pmap_extract(kernel_pmap, (vm_offset_t)r) == 0) { /* Exit loop if page not mapped */
760 printf("kmod validation scan stopped due to missing page: %08X\n", r);
761 break;
762 }
763 if (r->info->info_version != IS_IN_BACKTRACE) {
764 r->info->info_version = IS_A_DEPENDENCY;
765 found_one++;
766 }
767 r = r->next;
768 }
769 }
770 k = k->next;
771 }
772 if (!found_one) goto cleanup;
773
774 printf("kernel module dependencies: ");
775 k = kmod;
776 while (k) {
777 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
778 printf("kmod dependency print stopped due to missing page: %08X\n", k);
779 break;
780 }
781 if (k->info_version == IS_A_DEPENDENCY) {
782 printf("%s(%s)@0x%x ", k->name, k->version, k->address);
783 }
784 k = k->next;
785 }
786 printf("\n");
787
788 cleanup:
789 // in case we double panic
790 k = kmod;
791 while (k) {
792 if(pmap_extract(kernel_pmap, (vm_offset_t)k) == 0) { /* Exit loop if page not mapped */
793 printf("kmod dump cleanup stopped due to missing page: %08X\n", k);
794 break;
795 }
796 k->info_version = KMOD_INFO_VERSION;
797 k = k->next;
798 }
799 }