2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 #include <zone_debug.h>
29 #include <mach/boolean.h>
30 #include <mach/kern_return.h>
31 #include <mach/mig_errors.h>
32 #include <mach/port.h>
33 #include <mach/vm_param.h>
34 #include <mach/notify.h>
35 //#include <mach/mach_host_server.h>
36 #include <mach/mach_types.h>
38 #include <machine/machparam.h> /* spl definitions */
40 #include <ipc/ipc_port.h>
41 #include <ipc/ipc_space.h>
43 #include <kern/clock.h>
45 #include <kern/counters.h>
46 #include <kern/queue.h>
47 #include <kern/zalloc.h>
48 #include <kern/thread.h>
49 #include <kern/task.h>
50 #include <kern/sched_prim.h>
51 #include <kern/misc_protos.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
57 #include <device/device_types.h>
58 #include <device/device_port.h>
59 #include <device/device_server.h>
61 #include <machine/machparam.h>
63 #if defined(__i386__) || defined(__x86_64__)
64 #include <i386/pmap.h>
66 #include <IOKit/IOTypes.h>
72 * Functions in iokit:IOUserClient.cpp
75 extern void iokit_add_reference( io_object_t obj
);
76 extern void iokit_add_connect_reference( io_object_t obj
);
78 extern ipc_port_t
iokit_port_for_object( io_object_t obj
,
79 ipc_kobject_type_t type
);
81 extern kern_return_t
iokit_client_died( io_object_t obj
,
82 ipc_port_t port
, ipc_kobject_type_t type
, mach_port_mscount_t
* mscount
);
85 iokit_client_memory_for_type(
89 vm_address_t
* address
,
93 extern ppnum_t
IOGetLastPageNumber(void);
96 * Functions imported by iokit:IOUserClient.cpp
99 extern ipc_port_t
iokit_alloc_object_port( io_object_t obj
,
100 ipc_kobject_type_t type
);
102 extern kern_return_t
iokit_destroy_object_port( ipc_port_t port
);
104 extern mach_port_name_t
iokit_make_send_right( task_t task
,
105 io_object_t obj
, ipc_kobject_type_t type
);
107 extern kern_return_t
iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
);
109 extern io_object_t
iokit_lookup_connect_ref(io_object_t clientRef
, ipc_space_t task
);
111 extern io_object_t
iokit_lookup_connect_ref_current_task(io_object_t clientRef
);
113 extern void iokit_retain_port( ipc_port_t port
);
114 extern void iokit_release_port( ipc_port_t port
);
115 extern void iokit_release_port_send( ipc_port_t port
);
117 extern void iokit_lock_port(ipc_port_t port
);
118 extern void iokit_unlock_port(ipc_port_t port
);
120 extern kern_return_t
iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
);
123 * Functions imported by iokit:IOMemoryDescriptor.cpp
126 extern kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
127 mach_vm_size_t length
, unsigned int mapFlags
);
129 extern kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
);
131 extern kern_return_t
IOProtectCacheMode(vm_map_t map
, mach_vm_address_t va
,
132 mach_vm_size_t length
, unsigned int options
);
134 extern unsigned int IODefaultCacheBits(addr64_t pa
);
137 * Lookup a device by its port.
138 * Doesn't consume the naked send right; produces a device reference.
140 MIGEXTERN io_object_t
141 iokit_lookup_object_port(
149 iokit_lock_port(port
);
150 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_OBJECT
)) {
151 obj
= (io_object_t
) port
->ip_kobject
;
152 iokit_add_reference( obj
);
157 iokit_unlock_port(port
);
162 MIGEXTERN io_object_t
163 iokit_lookup_connect_port(
171 iokit_lock_port(port
);
172 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
173 obj
= (io_object_t
) port
->ip_kobject
;
174 iokit_add_connect_reference( obj
);
179 iokit_unlock_port(port
);
185 iokit_lookup_connect_ref(io_object_t connectRef
, ipc_space_t space
)
187 io_object_t obj
= NULL
;
189 if (connectRef
&& MACH_PORT_VALID(CAST_MACH_PORT_TO_NAME(connectRef
))) {
193 kr
= ipc_object_translate(space
, CAST_MACH_PORT_TO_NAME(connectRef
), MACH_PORT_RIGHT_SEND
, (ipc_object_t
*)&port
);
195 if (kr
== KERN_SUCCESS
) {
196 assert(IP_VALID(port
));
201 iokit_lock_port(port
);
202 if (ip_active(port
) && (ip_kotype(port
) == IKOT_IOKIT_CONNECT
)) {
203 obj
= (io_object_t
) port
->ip_kobject
;
204 iokit_add_connect_reference(obj
);
206 iokit_unlock_port(port
);
216 iokit_lookup_connect_ref_current_task(io_object_t connectRef
)
218 return iokit_lookup_connect_ref(connectRef
, current_space());
222 iokit_retain_port( ipc_port_t port
)
224 ipc_port_reference( port
);
228 iokit_release_port( ipc_port_t port
)
230 ipc_port_release( port
);
234 iokit_release_port_send( ipc_port_t port
)
236 ipc_port_release_send( port
);
239 extern lck_mtx_t iokit_obj_to_port_binding_lock
;
242 iokit_lock_port( __unused ipc_port_t port
)
244 lck_mtx_lock(&iokit_obj_to_port_binding_lock
);
248 iokit_unlock_port( __unused ipc_port_t port
)
250 lck_mtx_unlock(&iokit_obj_to_port_binding_lock
);
254 * Get the port for a device.
255 * Consumes a device reference; produces a naked send right.
258 iokit_make_object_port(
267 port
= iokit_port_for_object( obj
, IKOT_IOKIT_OBJECT
);
269 sendPort
= ipc_port_make_send( port
);
270 iokit_release_port( port
);
274 iokit_remove_reference( obj
);
280 iokit_make_connect_port(
289 port
= iokit_port_for_object( obj
, IKOT_IOKIT_CONNECT
);
291 sendPort
= ipc_port_make_send( port
);
292 iokit_release_port( port
);
296 iokit_remove_reference( obj
);
304 iokit_alloc_object_port( io_object_t obj
, ipc_kobject_type_t type
)
311 /* Allocate port, keeping a reference for it. */
312 port
= ipc_port_alloc_kernel();
316 /* set kobject & type */
317 // iokit_add_reference( obj );
318 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
320 /* Request no-senders notifications on the port. */
322 notify
= ipc_port_make_sonce_locked( port
);
323 ipc_port_nsrequest( port
, 1, notify
, ¬ify
);
325 assert( notify
== IP_NULL
);
335 iokit_destroy_object_port( ipc_port_t port
)
338 iokit_lock_port(port
);
339 ipc_kobject_set( port
, IKO_NULL
, IKOT_NONE
);
341 // iokit_remove_reference( obj );
342 iokit_unlock_port(port
);
343 ipc_port_dealloc_kernel( port
);
346 return( KERN_SUCCESS
);
350 iokit_switch_object_port( ipc_port_t port
, io_object_t obj
, ipc_kobject_type_t type
)
352 iokit_lock_port(port
);
353 ipc_kobject_set( port
, (ipc_kobject_t
) obj
, type
);
354 iokit_unlock_port(port
);
356 return( KERN_SUCCESS
);
359 EXTERN mach_port_name_t
360 iokit_make_send_right( task_t task
, io_object_t obj
, ipc_kobject_type_t type
)
364 mach_port_name_t name
= 0;
367 return MACH_PORT_NULL
;
369 port
= iokit_port_for_object( obj
, type
);
371 sendPort
= ipc_port_make_send( port
);
372 iokit_release_port( port
);
376 if (IP_VALID( sendPort
)) {
378 kr
= ipc_object_copyout( task
->itk_space
, (ipc_object_t
) sendPort
,
379 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
380 if ( kr
!= KERN_SUCCESS
) {
381 ipc_port_release_send( sendPort
);
382 name
= MACH_PORT_NULL
;
384 } else if ( sendPort
== IP_NULL
)
385 name
= MACH_PORT_NULL
;
386 else if ( sendPort
== IP_DEAD
)
387 name
= MACH_PORT_DEAD
;
389 iokit_remove_reference( obj
);
395 iokit_mod_send_right( task_t task
, mach_port_name_t name
, mach_port_delta_t delta
)
397 return (mach_port_mod_refs( task
->itk_space
, name
, MACH_PORT_RIGHT_SEND
, delta
));
401 * Handle the No-More_Senders notification generated from a device port destroy.
402 * Since there are no longer any tasks which hold a send right to this device
403 * port a NMS notification has been generated.
407 iokit_no_senders( mach_no_senders_notification_t
* notification
)
410 io_object_t obj
= NULL
;
411 ipc_kobject_type_t type
= IKOT_NONE
;
414 port
= (ipc_port_t
) notification
->not_header
.msgh_remote_port
;
416 // convert a port to io_object_t.
417 if( IP_VALID(port
)) {
418 iokit_lock_port(port
);
419 if( ip_active(port
)) {
420 obj
= (io_object_t
) port
->ip_kobject
;
421 type
= ip_kotype( port
);
422 if( (IKOT_IOKIT_OBJECT
== type
)
423 || (IKOT_IOKIT_CONNECT
== type
))
424 iokit_add_reference( obj
);
428 iokit_unlock_port(port
);
432 mach_port_mscount_t mscount
= notification
->not_count
;
434 if( KERN_SUCCESS
!= iokit_client_died( obj
, port
, type
, &mscount
))
436 /* Re-request no-senders notifications on the port (if still active) */
438 if (ip_active(port
)) {
439 notify
= ipc_port_make_sonce_locked(port
);
440 ipc_port_nsrequest( port
, mscount
+ 1, notify
, ¬ify
);
442 if ( notify
!= IP_NULL
)
443 ipc_port_release_sonce(notify
);
448 iokit_remove_reference( obj
);
456 iokit_notify( mach_msg_header_t
* msg
)
458 switch (msg
->msgh_id
) {
459 case MACH_NOTIFY_NO_SENDERS
:
460 iokit_no_senders((mach_no_senders_notification_t
*) msg
);
463 case MACH_NOTIFY_PORT_DELETED
:
464 case MACH_NOTIFY_PORT_DESTROYED
:
465 case MACH_NOTIFY_SEND_ONCE
:
466 case MACH_NOTIFY_DEAD_NAME
:
468 printf("iokit_notify: strange notification %d\n", msg
->msgh_id
);
473 /* need to create a pmap function to generalize */
474 unsigned int IODefaultCacheBits(addr64_t pa
)
476 return(pmap_cache_attributes((ppnum_t
)(pa
>> PAGE_SHIFT
)));
479 kern_return_t
IOMapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_address_t pa
,
480 mach_vm_size_t length
, unsigned int options
)
485 pmap_t pmap
= map
->pmap
;
487 prot
= (options
& kIOMapReadOnly
)
488 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
490 pagenum
= (ppnum_t
)atop_64(pa
);
492 switch(options
& kIOMapCacheMask
) { /* What cache mode do we need? */
494 case kIOMapDefaultCache
:
496 flags
= IODefaultCacheBits(pa
);
499 case kIOMapInhibitCache
:
503 case kIOMapWriteThruCache
:
504 flags
= VM_WIMG_WTHRU
;
507 case kIOMapWriteCombineCache
:
508 flags
= VM_WIMG_WCOMB
;
511 case kIOMapCopybackCache
:
512 flags
= VM_WIMG_COPYBACK
;
514 case kIOMapCopybackInnerCache
:
515 flags
= VM_WIMG_INNERWBACK
;
519 pmap_set_cache_attributes(pagenum
, flags
);
521 vm_map_set_cache_attr(map
, (vm_map_offset_t
)va
);
524 // Set up a block mapped area
525 pmap_map_block(pmap
, va
, pagenum
, (uint32_t) atop_64(round_page_64(length
)), prot
, 0, 0);
527 return( KERN_SUCCESS
);
530 kern_return_t
IOUnmapPages(vm_map_t map
, mach_vm_address_t va
, mach_vm_size_t length
)
532 pmap_t pmap
= map
->pmap
;
534 pmap_remove(pmap
, trunc_page_64(va
), round_page_64(va
+ length
));
536 return( KERN_SUCCESS
);
539 kern_return_t
IOProtectCacheMode(vm_map_t __unused map
, mach_vm_address_t __unused va
,
540 mach_vm_size_t __unused length
, unsigned int __unused options
)
545 pmap_t pmap
= map
->pmap
;
546 pmap_flush_context pmap_flush_context_storage
;
547 boolean_t delayed_pmap_flush
= FALSE
;
549 prot
= (options
& kIOMapReadOnly
)
550 ? VM_PROT_READ
: (VM_PROT_READ
|VM_PROT_WRITE
);
552 switch (options
& kIOMapCacheMask
)
554 // what cache mode do we need?
555 case kIOMapDefaultCache
:
557 return (KERN_INVALID_ARGUMENT
);
559 case kIOMapInhibitCache
:
563 case kIOMapWriteThruCache
:
564 flags
= VM_WIMG_WTHRU
;
567 case kIOMapWriteCombineCache
:
568 flags
= VM_WIMG_WCOMB
;
571 case kIOMapCopybackCache
:
572 flags
= VM_WIMG_COPYBACK
;
576 pmap_flush_context_init(&pmap_flush_context_storage
);
577 delayed_pmap_flush
= FALSE
;
579 // enter each page's physical address in the target map
580 for (off
= 0; off
< length
; off
+= page_size
)
582 ppnum_t ppnum
= pmap_find_phys(pmap
, va
+ off
);
584 pmap_enter_options(pmap
, va
+ off
, ppnum
, prot
, VM_PROT_NONE
, flags
, TRUE
,
585 PMAP_OPTIONS_NOFLUSH
, (void *)&pmap_flush_context_storage
);
586 delayed_pmap_flush
= TRUE
;
589 if (delayed_pmap_flush
== TRUE
)
590 pmap_flush(&pmap_flush_context_storage
);
592 return (KERN_SUCCESS
);
595 ppnum_t
IOGetLastPageNumber(void)
597 #if __i386__ || __x86_64__
598 ppnum_t lastPage
, highest
= 0;
601 for (idx
= 0; idx
< pmap_memory_region_count
; idx
++)
603 lastPage
= pmap_memory_regions
[idx
].end
- 1;
604 if (lastPage
> highest
)
614 void IOGetTime( mach_timespec_t
* clock_time
);
615 void IOGetTime( mach_timespec_t
* clock_time
)
619 clock_get_system_nanotime(&sec
, &nsec
);
620 clock_time
->tv_sec
= (typeof(clock_time
->tv_sec
))sec
;
621 clock_time
->tv_nsec
= nsec
;