+
+ while (next) {
+ /* this should be fleshed out for the general case */
+ /* but this is not necessary for now. Indeed we */
+ /* are handling the com page inside of the */
+ /* shared_region mapping create calls for now for */
+ /* simplicities sake. If more general support is */
+ /* needed the code to manipulate the shared range */
+ /* chain can be pulled out and moved to the callers*/
+ shared_region_mapping_info(next,
+ &(map_info.text_region),
+ &(map_info.text_size),
+ &(map_info.data_region),
+ &(map_info.data_size),
+ &(map_info.region_mappings),
+ &(map_info.client_base),
+ &(map_info.alternate_base),
+ &(map_info.alternate_next),
+ &(map_info.fs_base),
+ &(map_info.system),
+ &(map_info.flags), &next);
+
+ vmaddr = map_info.client_base;
+ vm_map(map, &vmaddr, map_info.text_size,
+ 0, SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
+ map_info.text_region, 0, FALSE,
+ VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE);
+ }
+ }
+ }
+ if (dlp != 0)
+ ret = load_dylinker(dlp, dlarchbits, map, thr_act, depth, result, clean_regions, abi64);
+
+ if(depth == 1) {
+ if (result->thread_count == 0)
+ ret = LOAD_FAILURE;
+ else if ( abi64 ) {
+ /* Map in 64-bit commpage */
+ /* LP64todo - make this clean */
+ pmap_map_sharedpage(current_task(), get_map_pmap(map));
+ vm_map_commpage64(map);
+ } else {
+#ifdef __i386__
+ /*
+ * On Intel, the comm page doesn't get mapped
+ * automatically because it goes beyond the current end
+ * of the VM map in the current 3GB/1GB address space
+ * model.
+ * XXX This will probably become unnecessary when we
+ * switch to the 4GB/4GB address space model.
+ */
+ vm_map_commpage32(map);
+#endif /* __i386__ */