]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pmap.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1990,1991,1992 The University of Utah and
28 * the Center for Software Science (CSS).
29 * Copyright (c) 1991,1987 Carnegie Mellon University.
30 * All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation,
37 * and that all advertising materials mentioning features or use of
38 * this software display the following acknowledgement: ``This product
39 * includes software developed by the Center for Software Science at
40 * the University of Utah.''
41 *
42 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
43 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
44 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
45 * THIS SOFTWARE.
46 *
47 * CSS requests users of this software to return to css-dist@cs.utah.edu any
48 * improvements that they make and grant CSS redistribution rights.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 *
58 * Utah $Hdr: pmap.c 1.28 92/06/23$
59 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
60 */
61
62/*
63 * Manages physical address maps for powerpc.
64 *
65 * In addition to hardware address maps, this
66 * module is called upon to provide software-use-only
67 * maps which may or may not be stored in the same
68 * form as hardware maps. These pseudo-maps are
69 * used to store intermediate results from copy
70 * operations to and from address spaces.
71 *
72 * Since the information managed by this module is
73 * also stored by the logical address mapping module,
74 * this module may throw away valid virtual-to-physical
75 * mappings at almost any time. However, invalidations
76 * of virtual-to-physical mappings must be done as
77 * requested.
78 *
79 * In order to cope with hardware architectures which
80 * make virtual-to-physical map invalidates expensive,
81 * this module may delay invalidate or reduced protection
82 * operations until such time as they are actually
83 * necessary. This module is given full information to
84 * when physical maps must be made correct.
85 *
86 */
87
88#include <zone_debug.h>
89#include <cpus.h>
90#include <debug.h>
91#include <mach_kgdb.h>
92#include <mach_vm_debug.h>
93#include <db_machine_commands.h>
94
95#include <kern/thread.h>
9bccf70c 96#include <kern/simple_lock.h>
1c79356b
A
97#include <mach/vm_attributes.h>
98#include <mach/vm_param.h>
99#include <kern/spl.h>
100
101#include <kern/misc_protos.h>
102#include <ppc/misc_protos.h>
103#include <ppc/proc_reg.h>
104
105#include <vm/pmap.h>
106#include <vm/vm_map.h>
107#include <vm/vm_page.h>
108
109#include <ppc/pmap.h>
110#include <ppc/pmap_internals.h>
111#include <ppc/mem.h>
112#include <ppc/mappings.h>
113
114#include <ppc/new_screen.h>
115#include <ppc/Firmware.h>
116#include <ppc/savearea.h>
9bccf70c 117#include <ppc/exception.h>
1c79356b
A
118#include <ddb/db_output.h>
119
120#if DB_MACHINE_COMMANDS
121/* optionally enable traces of pmap operations in post-mortem trace table */
122/* #define PMAP_LOWTRACE 1 */
123#define PMAP_LOWTRACE 0
124#else /* DB_MACHINE_COMMANDS */
125/* Can not trace even if we wanted to */
126#define PMAP_LOWTRACE 0
127#endif /* DB_MACHINE_COMMANDS */
128
129#define PERFTIMES 0
130
131#if PERFTIMES && DEBUG
132#define debugLog2(a, b, c) dbgLog2(a, b, c)
133#else
134#define debugLog2(a, b, c)
135#endif
136
137extern unsigned int avail_remaining;
138extern unsigned int mappingdeb0;
139extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
140extern int real_ncpus; /* Number of actual CPUs */
141unsigned int debugbackpocket; /* (TEST/DEBUG) */
142
143vm_offset_t avail_next;
144vm_offset_t first_free_virt;
145int current_free_region; /* Used in pmap_next_page */
146
147/* forward */
148void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
149void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
150void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount);
151
152#if MACH_VM_DEBUG
153int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space);
154#endif
155
156#if DEBUG
157#define PDB_USER 0x01 /* exported functions */
158#define PDB_MAPPING 0x02 /* low-level mapping routines */
159#define PDB_ENTER 0x04 /* pmap_enter specifics */
160#define PDB_COPY 0x08 /* copy page debugging */
161#define PDB_ZERO 0x10 /* zero page debugging */
162#define PDB_WIRED 0x20 /* things concerning wired entries */
163#define PDB_PTEG 0x40 /* PTEG overflows */
164#define PDB_LOCK 0x100 /* locks */
165#define PDB_IO 0x200 /* Improper use of WIMG_IO checks - PCI machines */
166
167int pmdebug=0;
168#endif
169
170/* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
171
172extern struct pmap kernel_pmap_store;
173pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
174pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
175struct zone *pmap_zone; /* zone of pmap structures */
176boolean_t pmap_initialized = FALSE;
177
178/*
179 * Physical-to-virtual translations are handled by inverted page table
180 * structures, phys_tables. Multiple mappings of a single page are handled
181 * by linking the affected mapping structures. We initialise one region
182 * for phys_tables of the physical memory we know about, but more may be
183 * added as it is discovered (eg. by drivers).
184 */
185struct phys_entry *phys_table; /* For debugging */
186
187lock_t pmap_system_lock;
188
189decl_simple_lock_data(,tlb_system_lock)
190
191/*
192 * free pmap list. caches the first free_pmap_max pmaps that are freed up
193 */
194int free_pmap_max = 32;
195int free_pmap_count;
196pmap_t free_pmap_list;
197decl_simple_lock_data(,free_pmap_lock)
198
199/*
200 * Function to get index into phys_table for a given physical address
201 */
202
203struct phys_entry *pmap_find_physentry(vm_offset_t pa)
204{
205 int i;
206 struct phys_entry *entry;
207
208 for (i = pmap_mem_regions_count-1; i >= 0; i--) {
209 if (pa < pmap_mem_regions[i].start)
210 continue;
211 if (pa >= pmap_mem_regions[i].end)
212 return PHYS_NULL;
213
214 entry = &pmap_mem_regions[i].phys_table[(pa - pmap_mem_regions[i].start) >> PPC_PGSHIFT];
215 __asm__ volatile("dcbt 0,%0" : : "r" (entry)); /* We will use this in a little bit */
216 return entry;
217 }
218 kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa);
219 return PHYS_NULL;
220}
221
222/*
223 * kern_return_t
224 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
225 * boolean_t available, unsigned int attr)
226 * Allocate some extra physentries for the physical addresses given,
227 * specifying some default attribute that on the powerpc specifies
228 * the default cachability for any mappings using these addresses
229 * If the memory is marked as available, it is added to the general
230 * VM pool, otherwise it is not (it is reserved for card IO etc).
231 */
232kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
233 boolean_t available, unsigned int attr)
234{
235 int i,j;
236 spl_t s;
237
238 /* Only map whole pages */
239
240 panic("Forget it! You can't map no more memory, you greedy puke!\n");
241
242 spa = trunc_page(spa);
243 epa = round_page(epa);
244
245 /* First check that the region doesn't already exist */
246
247 assert (epa >= spa);
248 for (i = 0; i < pmap_mem_regions_count; i++) {
249 /* If we're below the next region, then no conflict */
250 if (epa < pmap_mem_regions[i].start)
251 break;
252 if (spa < pmap_mem_regions[i].end) {
253#if DEBUG
254 kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa,epa,attr);
255#endif /* DEBUG */
256 return KERN_NO_SPACE;
257 }
258 }
259
260#if DEBUG
261 kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i, pmap_mem_regions_count); /* (TEST/DEBUG) */
262#endif
263
264 /* Check that we've got enough space for another region */
265 if (pmap_mem_regions_count == PMAP_MEM_REGION_MAX)
266 return KERN_RESOURCE_SHORTAGE;
267
268 /* Once here, i points to the mem_region above ours in physical mem */
269
270 /* allocate a new phys_table for this new region */
271#if DEBUG
272 kprintf("pmap_add_physical_memory; kalloc\n"); /* (TEST/DEBUG) */
273#endif
274
275 phys_table = (struct phys_entry *)
276 kalloc(sizeof(struct phys_entry) * atop(epa-spa));
277#if DEBUG
278 kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table); /* (TEST/DEBUG) */
279#endif
280
281 /* Initialise the new phys_table entries */
282 for (j = 0; j < atop(epa-spa); j++) {
283
284 phys_table[j].phys_link = MAPPING_NULL;
285
286 mapping_phys_init(&phys_table[j], spa+(j*PAGE_SIZE), attr); /* Initialize the hardware specific portions */
287
288 }
289 s = splhigh();
290
291 /* Move all the phys_table entries up some to make room in
292 * the ordered list.
293 */
294 for (j = pmap_mem_regions_count; j > i ; j--)
295 pmap_mem_regions[j] = pmap_mem_regions[j-1];
296
297 /* Insert a new entry with some memory to back it */
298
299 pmap_mem_regions[i].start = spa;
300 pmap_mem_regions[i].end = epa;
301 pmap_mem_regions[i].phys_table = phys_table;
302
303 pmap_mem_regions_count++;
304 splx(s);
305
306#if DEBUG
307 for(i=0; i<pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
308 kprintf("region %d: %08X %08X %08X\n", i, pmap_mem_regions[i].start,
309 pmap_mem_regions[i].end, pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
310 }
311#endif
312
313 if (available) {
314 kprintf("warning : pmap_add_physical_mem() "
315 "available not yet supported\n");
316 }
317
318 return KERN_SUCCESS;
319}
320
321/*
322 * pmap_map(va, spa, epa, prot)
323 * is called during boot to map memory in the kernel's address map.
324 * A virtual address range starting at "va" is mapped to the physical
325 * address range "spa" to "epa" with machine independent protection
326 * "prot".
327 *
328 * "va", "spa", and "epa" are byte addresses and must be on machine
329 * independent page boundaries.
330 *
331 * Pages with a contiguous virtual address range, the same protection, and attributes.
332 * therefore, we map it with a single block.
333 *
334 */
335vm_offset_t
336pmap_map(
337 vm_offset_t va,
338 vm_offset_t spa,
339 vm_offset_t epa,
340 vm_prot_t prot)
341{
342
343
344 if (spa == epa)
345 return(va);
346
347 assert(epa > spa);
348 debugLog2(40, va, spa); /* Log pmap_map call */
349
350 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_DEFAULT, blkPerm); /* Set up a permanent block mapped area */
351
352 debugLog2(41, epa, prot); /* Log pmap_map call */
353
354 return(va);
355}
356
357/*
358 * pmap_map_bd(va, spa, epa, prot)
359 * Back-door routine for mapping kernel VM at initialisation.
360 * Used for mapping memory outside the known physical memory
361 * space, with caching disabled. Designed for use by device probes.
362 *
363 * A virtual address range starting at "va" is mapped to the physical
364 * address range "spa" to "epa" with machine independent protection
365 * "prot".
366 *
367 * "va", "spa", and "epa" are byte addresses and must be on machine
368 * independent page boundaries.
369 *
370 * WARNING: The current version of memcpy() can use the dcbz instruction
371 * on the destination addresses. This will cause an alignment exception
372 * and consequent overhead if the destination is caching-disabled. So
373 * avoid memcpy()ing into the memory mapped by this function.
374 *
375 * also, many other pmap_ routines will misbehave if you try and change
376 * protections or remove these mappings, they are designed to be permanent.
377 *
378 * These areas will be added to the autogen list, if possible. Existing translations
379 * are overridden and their mapping stuctures are released. This takes place in
380 * the autogen_map function.
381 *
382 * Locking:
383 * this routine is called only during system initialization when only
384 * one processor is active, so no need to take locks...
385 */
386vm_offset_t
387pmap_map_bd(
388 vm_offset_t va,
389 vm_offset_t spa,
390 vm_offset_t epa,
391 vm_prot_t prot)
392{
393 register struct mapping *mp;
394 register struct phys_entry *pp;
395
396
397 if (spa == epa)
398 return(va);
399
400 assert(epa > spa);
401
402 debugLog2(42, va, epa); /* Log pmap_map_bd call */
403
404 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_IO, blkPerm); /* Set up autogen area */
405
406 debugLog2(43, epa, prot); /* Log pmap_map_bd exit */
407
408 return(va);
409}
410
411/*
412 * Bootstrap the system enough to run with virtual memory.
413 * Map the kernel's code and data, and allocate the system page table.
414 * Called with mapping done by BATs. Page_size must already be set.
415 *
416 * Parameters:
417 * mem_size: Total memory present
418 * first_avail: First virtual address available
419 * first_phys_avail: First physical address available
420 */
421void
422pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, vm_offset_t *first_phys_avail, unsigned int kmapsize)
423{
424 register struct mapping *mp;
425 vm_offset_t addr;
426 vm_size_t size;
427 int i, num, j, rsize, mapsize, vmpagesz, vmmapsz;
428 unsigned int mask;
429 vm_offset_t first_used_addr;
430 PCA *pcaptr;
1c79356b
A
431
432 *first_avail = round_page(*first_avail);
433
434#if DEBUG
435 kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n",
436 *first_avail, *first_phys_avail, avail_remaining);
437#endif
438
439 assert(PAGE_SIZE == PPC_PGBYTES);
440
441 /*
442 * Initialize kernel pmap
443 */
444 kernel_pmap = &kernel_pmap_store;
445 cursor_pmap = &kernel_pmap_store;
446
447 lock_init(&pmap_system_lock,
448 FALSE, /* NOT a sleep lock */
449 ETAP_VM_PMAP_SYS,
450 ETAP_VM_PMAP_SYS_I);
451
452 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
453
454 kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
455 kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
456 kernel_pmap->ref_count = 1;
457 kernel_pmap->space = PPC_SID_KERNEL;
458 kernel_pmap->pmapvr = 0; /* Virtual = Real */
459 kernel_pmap->bmaps = 0; /* No block pages just yet */
460 for(i=0; i < 128; i++) { /* Clear usage slots */
461 kernel_pmap->pmapUsage[i] = 0;
462 }
463 for(i=0; i < 16; i++) { /* Initialize for laughs */
464 kernel_pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | PPC_SID_KERNEL;
465 }
466
467 /*
468 * Allocate: (from first_avail up)
469 * Aligned to its own size:
470 * hash table (for mem size 2**x, allocate 2**(x-10) entries)
471 * mapping table (same size and immediatly following hash table)
472 */
473 /* hash_table_size must be a power of 2, recommended sizes are
474 * taken from PPC601 User Manual, table 6-19. We take the next
475 * highest size if mem_size is not a power of two.
476 * TODO NMGS make this configurable at boot time.
477 */
478
479 num = sizeof(pte_t) * (mem_size >> 10);
480
481 for (hash_table_size = 64 * 1024; /* minimum size = 64Kbytes */
482 hash_table_size < num;
483 hash_table_size *= 2)
484 continue;
485
486 /* Scale to within any physical memory layout constraints */
487 do {
488 num = atop(mem_size); /* num now holds mem_size in pages */
489
490 /* size of all structures that we're going to allocate */
491
492 size = (vm_size_t) (
493 (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */
9bccf70c 494 ((InitialSaveBloks / 2) * PAGE_SIZE) + /* For backpocket saveareas */
1c79356b
A
495 hash_table_size + /* For hash table */
496 hash_table_size + /* For PTEG allocation table */
497 (num * sizeof(struct phys_entry)) /* For the physical entries */
498 );
499
500 mapsize = size = round_page(size); /* Get size of area to map that we just calculated */
501 mapsize = mapsize + kmapsize; /* Account for the kernel text size */
502
503 vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */
504 vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */
505
506 mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */
507
508 mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
509 mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */
510 mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */
511
512#if DEBUG
513 kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz);
514 kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz);
515 kprintf("pmap_bootstrap: size before mappings = %08X\n", size);
516 kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize);
517 kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize);
518#endif
519
520 size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */
521
522 /* hash table must be aligned to its size */
523
524 addr = (*first_avail +
525 (hash_table_size-1)) & ~(hash_table_size-1);
526
527 if (addr + size > pmap_mem_regions[0].end) {
528 hash_table_size /= 2;
529 } else {
530 break;
531 }
532 /* If we have had to shrink hash table to too small, panic */
533 if (hash_table_size == 32 * 1024)
534 panic("cannot lay out pmap memory map correctly");
535 } while (1);
536
537#if DEBUG
538 kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n",
539 hash_table_size, size, addr);
540#endif
541 if (round_page(*first_phys_avail) < trunc_page(addr)) {
542 /* We are stepping over at least one page here, so
543 * add this region to the free regions so that it can
544 * be allocated by pmap_steal
545 */
546 free_regions[free_regions_count].start = round_page(*first_phys_avail);
547 free_regions[free_regions_count].end = trunc_page(addr);
548
549 avail_remaining += (free_regions[free_regions_count].end -
550 free_regions[free_regions_count].start) /
551 PPC_PGBYTES;
552#if DEBUG
553 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
554 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
555 avail_remaining);
556#endif /* DEBUG */
557 free_regions_count++;
558 }
559
560 /* Zero everything - this also invalidates the hash table entries */
561 bzero((char *)addr, size);
562
563 /* Set up some pointers to our new structures */
564
565 /* from here, addr points to the next free address */
566
567 first_used_addr = addr; /* remember where we started */
568
569 /* Set up hash table address and dma buffer address, keeping
570 * alignment. These mappings are all 1-1, so dma_r == dma_v
571 *
572 * If hash_table_size == dma_buffer_alignment, then put hash_table
573 * first, since dma_buffer_size may be smaller than alignment, but
574 * hash table alignment==hash_table_size.
575 */
576 hash_table_base = addr;
577
578 addr += hash_table_size;
579 addr += hash_table_size; /* Add another for the PTEG Control Area */
580 assert((hash_table_base & (hash_table_size-1)) == 0);
581
582 pcaptr = (PCA *)(hash_table_base+hash_table_size); /* Point to the PCA table */
9bccf70c 583 mapCtl.mapcflush.pcaptr = pcaptr;
1c79356b
A
584
585 for(i=0; i < (hash_table_size/64) ; i++) { /* For all of PTEG control areas: */
586 pcaptr[i].flgs.PCAalflgs.PCAfree=0xFF; /* Mark all slots free */
587 pcaptr[i].flgs.PCAalflgs.PCAsteal=0x01; /* Initialize steal position */
588 }
589
9bccf70c
A
590 savearea_init(&addr); /* Initialize the savearea chains and data */
591
1c79356b
A
592 /* phys_table is static to help debugging,
593 * this variable is no longer actually used
594 * outside of this scope
595 */
596
597 phys_table = (struct phys_entry *) addr;
598
599#if DEBUG
600 kprintf("hash_table_base =%08X\n", hash_table_base);
601 kprintf("phys_table =%08X\n", phys_table);
602 kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count);
603#endif
604
605 for (i = 0; i < pmap_mem_regions_count; i++) {
606
607 pmap_mem_regions[i].phys_table = phys_table;
608 rsize = (pmap_mem_regions[i].end - (unsigned int)pmap_mem_regions[i].start)/PAGE_SIZE;
609
610#if DEBUG
611 kprintf("Initializing physical table for region %d\n", i);
612 kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n",
613 phys_table, rsize, pmap_mem_regions[i].start,
614 (unsigned int)pmap_mem_regions[i].end);
615#endif
616
617 for (j = 0; j < rsize; j++) {
618 phys_table[j].phys_link = MAPPING_NULL;
619 mapping_phys_init(&phys_table[j], (unsigned int)pmap_mem_regions[i].start+(j*PAGE_SIZE),
620 PTE_WIMG_DEFAULT); /* Initializes hw specific storage attributes */
621 }
622 phys_table = phys_table +
623 atop(pmap_mem_regions[i].end - pmap_mem_regions[i].start);
624 }
625
626 /* restore phys_table for debug */
627 phys_table = (struct phys_entry *) addr;
628
629 addr += sizeof(struct phys_entry) * num;
630
631 simple_lock_init(&tlb_system_lock, ETAP_VM_PMAP_TLB);
632
633 /* Initialise the registers necessary for supporting the hashtable */
634#if DEBUG
635 kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base, hash_table_size);
636#endif
637
638 hash_table_init(hash_table_base, hash_table_size);
639
640/*
641 * Remaining space is for mapping entries. Tell the initializer routine that
642 * the mapping system can't release this block because it's permanently assigned
643 */
644
645 mapping_init(); /* Initialize the mapping tables */
646
647 for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */
648 mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */
649 }
650 mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */
651
652#if DEBUG
653
654 kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n",
655 first_used_addr, round_page(first_used_addr+size),
656 first_used_addr);
657#endif /* DEBUG */
658
659 /* Map V=R the page tables */
660 pmap_map(first_used_addr, first_used_addr,
661 round_page(first_used_addr+size), VM_PROT_READ | VM_PROT_WRITE);
662
663#if DEBUG
664
665 for(i=first_used_addr; i < round_page(first_used_addr+size); i+=PAGE_SIZE) { /* Step through all these mappings */
666 if(i != (j = kvtophys(i))) { /* Verify that the mapping was made V=R */
667 kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i, j);
668 }
669 }
670#endif
671
672 *first_avail = round_page(first_used_addr + size);
673 first_free_virt = round_page(first_used_addr + size);
674
675 /* All the rest of memory is free - add it to the free
676 * regions so that it can be allocated by pmap_steal
677 */
678 free_regions[free_regions_count].start = *first_avail;
679 free_regions[free_regions_count].end = pmap_mem_regions[0].end;
680
681 avail_remaining += (free_regions[free_regions_count].end -
682 free_regions[free_regions_count].start) /
683 PPC_PGBYTES;
684
685#if DEBUG
686 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
687 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
688 avail_remaining);
689#endif /* DEBUG */
690
691 free_regions_count++;
692
693 current_free_region = 0;
694
695 avail_next = free_regions[current_free_region].start;
696
697#if DEBUG
698 kprintf("Number of free regions=%d\n",free_regions_count); /* (TEST/DEBUG) */
699 kprintf("Current free region=%d\n",current_free_region); /* (TEST/DEBUG) */
700 for(i=0;i<free_regions_count; i++) { /* (TEST/DEBUG) */
701 kprintf("Free region %3d - from %08X to %08X\n", i, free_regions[i].start,
702 free_regions[i].end); /* (TEST/DEBUG) */
703 }
704 for (i = 0; i < pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
705 kprintf("PMAP region %3d - from %08X to %08X; phys=%08X\n", i, /* (TEST/DEBUG) */
706 pmap_mem_regions[i].start, /* (TEST/DEBUG) */
707 pmap_mem_regions[i].end, /* (TEST/DEBUG) */
708 pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
709 }
710#endif
711
712}
713
714/*
715 * pmap_init(spa, epa)
716 * finishes the initialization of the pmap module.
717 * This procedure is called from vm_mem_init() in vm/vm_init.c
718 * to initialize any remaining data structures that the pmap module
719 * needs to map virtual memory (VM is already ON).
720 *
721 * Note that the pmap needs to be sized and aligned to
722 * a power of two. This is because it is used both in virtual and
723 * real so it can't span a page boundary.
724 */
725
726void
727pmap_init(void)
728{
729
730
731 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
732#if ZONE_DEBUG
733 zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */
734#endif /* ZONE_DEBUG */
735
736 pmap_initialized = TRUE;
737
738 /*
739 * Initialize list of freed up pmaps
740 */
741 free_pmap_list = 0; /* Set that there are no free pmaps */
742 free_pmap_count = 0;
743 simple_lock_init(&free_pmap_lock, ETAP_VM_PMAP_CACHE);
744}
745
746unsigned int pmap_free_pages(void)
747{
748 return avail_remaining;
749}
750
751boolean_t pmap_next_page(vm_offset_t *addrp)
752{
753 /* Non optimal, but only used for virtual memory startup.
754 * Allocate memory from a table of free physical addresses
755 * If there are no more free entries, too bad. We have two
756 * tables to look through, free_regions[] which holds free
757 * regions from inside pmap_mem_regions[0], and the others...
758 * pmap_mem_regions[1..]
759 */
760
761 /* current_free_region indicates the next free entry,
762 * if it's less than free_regions_count, then we're still
763 * in free_regions, otherwise we're in pmap_mem_regions
764 */
765
766 if (current_free_region >= free_regions_count) {
767 /* We're into the pmap_mem_regions, handle this
768 * separately to free_regions
769 */
770
771 int current_pmap_mem_region = current_free_region -
772 free_regions_count + 1;
773 if (current_pmap_mem_region > pmap_mem_regions_count)
774 return FALSE;
775 *addrp = avail_next;
776 avail_next += PAGE_SIZE;
777 avail_remaining--;
778 if (avail_next >= pmap_mem_regions[current_pmap_mem_region].end) {
779 current_free_region++;
780 current_pmap_mem_region++;
781 avail_next = pmap_mem_regions[current_pmap_mem_region].start;
782#if DEBUG
783 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
784#endif /* DEBUG */
785 }
786 return TRUE;
787 }
788
789 /* We're in the free_regions, allocate next page and increment
790 * counters
791 */
792 *addrp = avail_next;
793
794 avail_next += PAGE_SIZE;
795 avail_remaining--;
796
797 if (avail_next >= free_regions[current_free_region].end) {
798 current_free_region++;
799 if (current_free_region < free_regions_count)
800 avail_next = free_regions[current_free_region].start;
801 else
802 avail_next = pmap_mem_regions[current_free_region -
803 free_regions_count + 1].start;
804#if DEBUG
805 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
806#endif
807 }
808 return TRUE;
809}
810
811void pmap_virtual_space(
812 vm_offset_t *startp,
813 vm_offset_t *endp)
814{
815 *startp = round_page(first_free_virt);
816 *endp = VM_MAX_KERNEL_ADDRESS;
817}
818
819/*
820 * pmap_create
821 *
822 * Create and return a physical map.
823 *
824 * If the size specified for the map is zero, the map is an actual physical
825 * map, and may be referenced by the hardware.
826 *
827 * A pmap is either in the free list or in the in-use list. The only use
828 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
829 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
830 * in-use list is matched until a hole in the VSID sequence is found. (Note
831 * that the in-use pmaps are queued in VSID sequence order.) This is all done
832 * while free_pmap_lock is held.
833 *
834 * If the size specified is non-zero, the map will be used in software
835 * only, and is bounded by that size.
836 */
837pmap_t
838pmap_create(vm_size_t size)
839{
840 pmap_t pmap, ckpmap, fore, aft;
841 int s, i;
842 space_t sid;
843 unsigned int currSID;
844
845#if PMAP_LOWTRACE
846 dbgTrace(0xF1D00001, size, 0); /* (TEST/DEBUG) */
847#endif
848
849#if DEBUG
850 if (pmdebug & PDB_USER)
851 kprintf("pmap_create(size=%x)%c", size, size ? '\n' : ' ');
852#endif
853
854 /*
855 * A software use-only map doesn't even need a pmap structure.
856 */
857 if (size)
858 return(PMAP_NULL);
859
860 /*
861 * If there is a pmap in the pmap free list, reuse it.
862 * Note that we use free_pmap_list for all chaining of pmaps, both to
863 * the free list and the in use chain (anchored from kernel_pmap).
864 */
865 s = splhigh();
866 simple_lock(&free_pmap_lock);
867
868 if(free_pmap_list) { /* Any free? */
869 pmap = free_pmap_list; /* Yes, allocate it */
870 free_pmap_list = (pmap_t)pmap->bmaps; /* Dequeue this one (we chain free ones through bmaps) */
871 free_pmap_count--;
872 }
873 else {
874 simple_unlock(&free_pmap_lock); /* Unlock just in case */
875 splx(s);
876
877 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
878 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
879
880 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
881
882 s = splhigh();
883 simple_lock(&free_pmap_lock); /* Lock it back up */
884
885 ckpmap = cursor_pmap; /* Get starting point for free ID search */
886 currSID = ckpmap->spaceNum; /* Get the actual space ID number */
887
888 while(1) { /* Keep trying until something happens */
889
890 currSID = (currSID + 1) & SID_MAX; /* Get the next in the sequence */
891 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
892
893 if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */
894
895 if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */
896 panic("pmap_create: Maximum number (2^20) active address spaces reached\n"); /* Die pig dog */
897 }
898 }
899
900 pmap->space = (currSID * incrVSID) & SID_MAX; /* Calculate the actual VSID */
901 pmap->spaceNum = currSID; /* Set the space ID number */
902
903/*
904 * Now we link into the chain just before the out of sequence guy.
905 */
906
907 fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */
908 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
909 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
910 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
911 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
912
913 simple_lock_init(&pmap->lock, ETAP_VM_PMAP);
914 pmap->pmapvr = (unsigned int)pmap ^ (unsigned int)pmap_extract(kernel_pmap, (vm_offset_t)pmap); /* Get physical pointer to the pmap and make mask */
915 }
916 pmap->ref_count = 1;
917 pmap->stats.resident_count = 0;
918 pmap->stats.wired_count = 0;
919 pmap->bmaps = 0; /* Clear block map pointer to 0 */
920 pmap->vflags = 0; /* Mark all alternates invalid for now */
921 for(i=0; i < 128; i++) { /* Clean out usage slots */
922 pmap->pmapUsage[i] = 0;
923 }
924 for(i=0; i < 16; i++) { /* Initialize for laughs */
925 pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | pmap->space;
926 }
927
928#if PMAP_LOWTRACE
929 dbgTrace(0xF1D00002, (unsigned int)pmap, (unsigned int)pmap->space); /* (TEST/DEBUG) */
930#endif
931
932#if DEBUG
933 if (pmdebug & PDB_USER)
934 kprintf("-> %x, space id = %d\n", pmap, pmap->space);
935#endif
936
937 simple_unlock(&free_pmap_lock);
938 splx(s);
939 return(pmap);
940}
941
942/*
943 * pmap_destroy
944 *
945 * Gives up a reference to the specified pmap. When the reference count
946 * reaches zero the pmap structure is added to the pmap free list.
947 *
948 * Should only be called if the map contains no valid mappings.
949 */
950void
951pmap_destroy(pmap_t pmap)
952{
953 int ref_count;
954 spl_t s;
955 pmap_t fore, aft;
956
957#if PMAP_LOWTRACE
958 dbgTrace(0xF1D00003, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
959#endif
960
961#if DEBUG
962 if (pmdebug & PDB_USER)
963 kprintf("pmap_destroy(pmap=%x)\n", pmap);
964#endif
965
966 if (pmap == PMAP_NULL)
967 return;
968
969 ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */
970 if(ref_count>0) return; /* Still more users, leave now... */
971
972 if(ref_count < 0) /* Did we go too far? */
973 panic("pmap_destroy(): ref_count < 0");
974
975#ifdef notdef
976 if(pmap->stats.resident_count != 0)
977 panic("PMAP_DESTROY: pmap not empty");
978#else
979 if(pmap->stats.resident_count != 0) {
980 pmap_remove(pmap, 0, 0xFFFFF000);
981 }
982#endif
983
984 /*
985 * Add the pmap to the pmap free list.
986 */
987
988 s = splhigh();
989 /*
990 * Add the pmap to the pmap free list.
991 */
992 simple_lock(&free_pmap_lock);
993
994 if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */
995
996 pmap->bmaps = (struct blokmap *)free_pmap_list; /* Queue in front */
997 free_pmap_list = pmap;
998 free_pmap_count++;
999 simple_unlock(&free_pmap_lock);
1000
1001 } else {
1002 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
1003 fore = (pmap_t)pmap->pmap_link.prev;
1004 aft = (pmap_t)pmap->pmap_link.next;
1005 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
1006 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
1007 simple_unlock(&free_pmap_lock);
1008 zfree(pmap_zone, (vm_offset_t) pmap);
1009 }
1010 splx(s);
1011}
1012
1013/*
1014 * pmap_reference(pmap)
1015 * gains a reference to the specified pmap.
1016 */
1017void
1018pmap_reference(pmap_t pmap)
1019{
1020 spl_t s;
1021
1022#if PMAP_LOWTRACE
1023 dbgTrace(0xF1D00004, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
1024#endif
1025
1026#if DEBUG
1027 if (pmdebug & PDB_USER)
1028 kprintf("pmap_reference(pmap=%x)\n", pmap);
1029#endif
1030
1031 if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
1032}
1033
0b4e3aa0
A
1034/*
1035 * pmap_remove_some_phys
1036 *
1037 * Removes mappings of the associated page from the specified pmap
1038 *
1039 */
1040void pmap_remove_some_phys(
1041 pmap_t pmap,
1042 vm_offset_t pa)
1043{
1044 register struct phys_entry *pp;
1045 register struct mapping *mp, *mpv;
1046
1047
1048 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1049
1050 pp = pmap_find_physentry(pa); /* Get the physent for this page */
1051 if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */
1052
1053 mapping_purge_pmap(pp, pmap);
1054
1055 return; /* Leave... */
1056}
1057
1c79356b
A
1058/*
1059 * pmap_remove(pmap, s, e)
1060 * unmaps all virtual addresses v in the virtual address
1061 * range determined by [s, e) and pmap.
1062 * s and e must be on machine independent page boundaries and
1063 * s must be less than or equal to e.
1064 *
1065 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
1066 * skip those segments.
1067 */
1068void
1069pmap_remove(
1070 pmap_t pmap,
1071 vm_offset_t sva,
1072 vm_offset_t eva)
1073{
1074 spl_t spl;
1075 struct mapping *mp, *blm;
1076 vm_offset_t lpage;
1077
1078#if PMAP_LOWTRACE
1079 dbgTrace(0xF1D00005, (unsigned int)pmap, sva|((eva-sva)>>12)); /* (TEST/DEBUG) */
1080#endif
1081
1082#if DEBUG
1083 if (pmdebug & PDB_USER)
1084 kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n",
1085 pmap, sva, eva);
1086#endif
1087
1088 if (pmap == PMAP_NULL)
1089 return;
1090
1091 /* It is just possible that eva might have wrapped around to zero,
1092 * and sometimes we get asked to liberate something of size zero
1093 * even though it's dumb (eg. after zero length read_overwrites)
1094 */
1095 assert(eva >= sva);
1096
1097 /* If these are not page aligned the loop might not terminate */
1098 assert((sva == trunc_page(sva)) && (eva == trunc_page(eva)));
1099
1100 /* We liberate addresses from high to low, since the stack grows
1101 * down. This means that we won't need to test addresses below
1102 * the limit of stack growth
1103 */
1104
1105 debugLog2(44, sva, eva); /* Log pmap_map call */
1106
1107 sva = trunc_page(sva); /* Make it clean */
1108 lpage = trunc_page(eva) - PAGE_SIZE; /* Point to the last page contained in the range */
1109
1110/*
1111 * Here we will remove all of the block mappings that overlap this range.
1112 * hw_rem_blk removes one mapping in the range and returns. If it returns
1113 * 0, there are no blocks in the range.
1114 */
1115
1116 while(mp = (mapping *)hw_rem_blk(pmap, sva, lpage)) { /* Keep going until no more */
1117 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
9bccf70c 1118 blm = (struct mapping *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */
1c79356b
A
1119 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
1120 pmap, sva, blm);
1121 }
9bccf70c
A
1122 if (!((unsigned int)mp & 2))
1123 mapping_free(hw_cpv(mp)); /* Release it */
1c79356b 1124 }
1c79356b
A
1125 while (pmap->stats.resident_count && (eva > sva)) {
1126
1127 eva -= PAGE_SIZE; /* Back up a page */
1128
1129#if 1
1130 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1131 panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1132#endif
1133 if(!(pmap->pmapUsage[(eva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1134 eva = eva & (-pmapUsageSize); /* Back up into the previous slot */
1135 continue; /* Check the next... */
1136 }
1137 mapping_remove(pmap, eva); /* Remove the mapping for this address */
1138 }
1139
1140 debugLog2(45, 0, 0); /* Log pmap_map call */
1141}
1142
1143/*
1144 * Routine:
1145 * pmap_page_protect
1146 *
1147 * Function:
1148 * Lower the permission for all mappings to a given page.
1149 */
1150void
1151pmap_page_protect(
1152 vm_offset_t pa,
1153 vm_prot_t prot)
1154{
1155 register struct phys_entry *pp;
1156 boolean_t remove;
1157
1158
1159#if PMAP_LOWTRACE
1160 dbgTrace(0xF1D00006, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1161#endif
1162
1163#if DEBUG
1164 if (pmdebug & PDB_USER)
1165 kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa, prot);
1166#endif
1167
1168 debugLog2(46, pa, prot); /* Log pmap_page_protect call */
1169
1170 switch (prot) {
1171 case VM_PROT_READ:
1172 case VM_PROT_READ|VM_PROT_EXECUTE:
1173 remove = FALSE;
1174 break;
1175 case VM_PROT_ALL:
1176 return;
1177 default:
1178 remove = TRUE;
1179 break;
1180 }
1181
1182 pp = pmap_find_physentry(pa); /* Get the physent for this page */
1183 if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */
1184
1185 if (remove) { /* If the protection was set to none, we'll remove all mappings */
1186 mapping_purge(pp); /* Get rid of them all */
1187
1188 debugLog2(47, 0, 0); /* Log pmap_map call */
1189 return; /* Leave... */
1190 }
1191
1192 /* When we get here, it means that we are to change the protection for a
1193 * physical page.
1194 */
1195
1196 mapping_protect_phys(pp, prot, 0); /* Change protection of all mappings to page. */
1197
1198 debugLog2(47, 1, 0); /* Log pmap_map call */
1199}
1200
1201/*
1202 * pmap_protect(pmap, s, e, prot)
1203 * changes the protection on all virtual addresses v in the
1204 * virtual address range determined by [s, e] and pmap to prot.
1205 * s and e must be on machine independent page boundaries and
1206 * s must be less than or equal to e.
1207 *
1208 * Note that any requests to change the protection of a nested pmap are
1209 * ignored. Those changes MUST be done by calling this with the correct pmap.
1210 */
1211void pmap_protect(
1212 pmap_t pmap,
1213 vm_offset_t sva,
1214 vm_offset_t eva,
1215 vm_prot_t prot)
1216{
1217 spl_t spl;
1218 register struct phys_entry *pp;
1219 register struct mapping *mp, *mpv;
1220
1221#if PMAP_LOWTRACE
1222 dbgTrace(0xF1D00008, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1223#endif
1224
1225#if DEBUG
1226 if (pmdebug & PDB_USER)
1227 kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap, sva, eva, prot);
1228
1229 assert(sva < eva);
1230#endif
1231
1232 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1233
1234 debugLog2(48, sva, eva); /* Log pmap_map call */
1235
1236 if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */
1237 pmap_remove(pmap, sva, eva); /* Yeah, dump 'em */
1238
1239 debugLog2(49, prot, 0); /* Log pmap_map call */
1240
1241 return; /* Leave... */
1242 }
1243
1244 sva = trunc_page(sva); /* Start up a page boundary */
1245
1246 while(sva < eva) { /* Step through */
1247
1248 if(!(pmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1249 sva = (sva + pmapUsageSize) &(-pmapUsageSize); /* Jump up into the next slot if nothing here */
1250 if(!sva) break; /* We tried to wrap, kill loop... */
1251 continue; /* Check the next... */
1252 }
1253
1254#if 1
1255 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1256 panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1257#endif
1258
1259 mapping_protect(pmap, sva, prot); /* Change the protection on the page */
1260 sva += PAGE_SIZE; /* On to the next page */
1261 }
1262
1263 debugLog2(49, prot, 1); /* Log pmap_map call */
1264 return; /* Leave... */
1265}
1266
9bccf70c
A
1267
1268
1c79356b
A
1269/*
1270 * pmap_enter
1271 *
1272 * Create a translation for the virtual address (virt) to the physical
1273 * address (phys) in the pmap with the protection requested. If the
1274 * translation is wired then we can not allow a full page fault, i.e.,
1275 * the mapping control block is not eligible to be stolen in a low memory
1276 * condition.
1277 *
1278 * NB: This is the only routine which MAY NOT lazy-evaluate
1279 * or lose information. That is, this routine must actually
1280 * insert this page into the given map NOW.
1281 */
1282void
9bccf70c
A
1283pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
1284 unsigned int flags, boolean_t wired)
1c79356b
A
1285{
1286 spl_t spl;
1287 struct mapping *mp;
1288 struct phys_entry *pp;
1289 int memattr;
1290
1291#if PMAP_LOWTRACE
1292 dbgTrace(0xF1D00009, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1293 dbgTrace(0xF1D04009, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1294#endif
1295
1296 if (pmap == PMAP_NULL) return; /* If they gave us no pmap, just leave... */
1297
1298 debugLog2(50, va, pa); /* Log pmap_map call */
1299
1300 pp = pmap_find_physentry(pa); /* Get the physent for this physical page */
1301
1302 if((0x00008000 >> (va >> 28)) & pmap->vflags)
1303 panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, va); /* (TEST/DEBUG) panic */
1304
1305 spl=splhigh(); /* Have to disallow interrupts between the
1306 time we possibly clear a mapping and the time
1307 we get it remapped again. An I/O SLIH could
1308 try to drive an IOR using the page before
1309 we get it mapped (Dude! This was a tough
1310 bug!!!!) */
1311
1312 mapping_remove(pmap, va); /* Remove any other mapping at this address */
9bccf70c
A
1313
1314 if(flags & VM_WIMG_USE_DEFAULT) {
1315 if(pp) {
1316 /* Set attr to the phys default */
1317 memattr = ((pp->pte1&0x00000078) >> 3);
1318 } else {
1319 memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED;
1320 }
1321 } else {
1322 memattr = flags & VM_WIMG_MASK;
1323 }
1c79356b 1324
1c79356b 1325
9bccf70c
A
1326 /* Make the address mapping */
1327 mp=mapping_make(pmap, pp, va, pa, prot, memattr, 0);
1c79356b
A
1328
1329 splx(spl); /* I'm not busy no more - come what may */
1330
1331 debugLog2(51, prot, 0); /* Log pmap_map call */
1332
1333#if DEBUG
1334 if (pmdebug & (PDB_USER|PDB_ENTER))
1335 kprintf("leaving pmap_enter\n");
1336#endif
1337
1338}
1339
1340/*
1341 * pmap_extract(pmap, va)
1342 * returns the physical address corrsponding to the
1343 * virtual address specified by pmap and va if the
1344 * virtual address is mapped and 0 if it is not.
1345 */
1346vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) {
1347
1348 spl_t spl;
1349 register struct mapping *mp, *mpv;
1350 register vm_offset_t pa;
1351 unsigned int seg;
1352 pmap_t actpmap;
1353
1354
1355#if PMAP_LOWTRACE
1356 dbgTrace(0xF1D0000B, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1357#endif
1358#if DEBUG
1359 if (pmdebug & PDB_USER)
1360 kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap, va);
1361#endif
1362
1363 seg = va >> 28; /* Isolate segment */
1364 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1365 else actpmap = pmap; /* Otherwise use the one passed in */
1366
1367 pa = (vm_offset_t) 0; /* Clear this to 0 */
1368
1369 debugLog2(52, actpmap->space, va); /* Log pmap_map call */
1370
1371 spl = splhigh(); /* We can't allow any loss of control here */
1372
1373 if(mp=hw_lock_phys_vir(actpmap->space, va)) { /* Find the mapping for this vaddr and lock physent */
1374 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1375 panic("pmap_extract: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1376 splx(spl); /* Interruptions are cool now */
1377 return 0;
1378 }
1379
1380 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1381 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Build the physical address */
1382 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1383 splx(spl); /* Interruptions are cool now */
1384
1385 debugLog2(53, pa, 0); /* Log pmap_map call */
1386
1387 return pa; /* Return the physical address... */
1388 }
1389
1390 pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1391 /* Note no nested pmaps here */
1392 splx(spl); /* Restore 'rupts */
1393 debugLog2(53, pa, 0); /* Log pmap_map call */
1394 return pa; /* Return physical address or 0 */
1395}
1396
9bccf70c
A
1397/*
1398 * pmap_attribute_cache_sync
1399 * Handle the machine attribute calls which involve sync the prcessor
1400 * cache.
1401 */
1402kern_return_t
1403pmap_attribute_cache_sync(address, size, attribute, value)
1404 vm_offset_t address;
1405 vm_size_t size;
1406 vm_machine_attribute_t attribute;
1407 vm_machine_attribute_val_t* value;
1408{
1409 while(size) {
1410 switch (*value) { /* What type was that again? */
1411 case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */
1412 sync_cache(address, PAGE_SIZE); /* Sync up dem caches */
1413 break; /* Done with this one here... */
1414
1415 case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */
1416 flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */
1417 invalidate_icache(address,
1418 PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1419 break; /* Done with this one here... */
1420
1421 case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */
1422 flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */
1423 break; /* Done with this one here... */
1424
1425 case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */
1426 invalidate_icache(address,
1427 PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1428 break; /* Done with this one here... */
1429 }
1430 size -= PAGE_SIZE;
1431 }
1432 return KERN_SUCCESS;;
1433}
1434
1435
1c79356b
A
1436/*
1437 * pmap_attributes:
1438 *
1439 * Set/Get special memory attributes; Set is not implemented.
1440 *
1441 * Note: 'VAL_GET_INFO' is used to return info about a page.
1442 * If less than 1 page is specified, return the physical page
1443 * mapping and a count of the number of mappings to that page.
1444 * If more than one page is specified, return the number
1445 * of resident pages and the number of shared (more than
1446 * one mapping) pages in the range;
1447 *
1448 */
1449kern_return_t
1450pmap_attribute(pmap, address, size, attribute, value)
1451 pmap_t pmap;
1452 vm_offset_t address;
1453 vm_size_t size;
1454 vm_machine_attribute_t attribute;
1455 vm_machine_attribute_val_t* value;
1456{
1457 spl_t s;
1458 vm_offset_t sva, eva;
1459 vm_offset_t pa;
1460 kern_return_t ret;
1461 register struct mapping *mp, *mpv;
1462 register struct phys_entry *pp;
1463 int total, seg;
1464 pmap_t actpmap;
1465
1466 if (attribute != MATTR_CACHE)
1467 return KERN_INVALID_ARGUMENT;
1468
1469 /* We can't get the caching attribute for more than one page
1470 * at a time
1471 */
1472 if ((*value == MATTR_VAL_GET) &&
1473 (trunc_page(address) != trunc_page(address+size-1)))
1474 return KERN_INVALID_ARGUMENT;
1475
1476 if (pmap == PMAP_NULL)
1477 return KERN_SUCCESS;
1478
1479 sva = trunc_page(address);
1480 eva = round_page(address + size);
1481 ret = KERN_SUCCESS;
1482
1483 debugLog2(54, address, attribute); /* Log pmap_map call */
1484
1485 switch (*value) {
1486 case MATTR_VAL_CACHE_SYNC: /* sync I+D caches */
1487 case MATTR_VAL_CACHE_FLUSH: /* flush from all caches */
1488 case MATTR_VAL_DCACHE_FLUSH: /* flush from data cache(s) */
1489 case MATTR_VAL_ICACHE_FLUSH: /* flush from instr cache(s) */
1490 sva = trunc_page(sva);
1491 s = splhigh();
1492
1493 while (sva < eva) {
1494 seg = sva >> 28; /* Isolate segment */
1495 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1496 else actpmap = pmap; /* Otherwise use the one passed in */
1497
1498/*
1499 * Note: the following should work ok with nested pmaps because there are not overlayed mappings
1500 */
1501 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1502 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1503 if(!sva) break; /* We tried to wrap, kill loop... */
1504 continue; /* Check the next... */
1505 }
1506
1507 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1508 sva += PAGE_SIZE; /* Point to the next page */
1509 continue; /* Skip if the page is not mapped... */
1510 }
1511
1512 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1513 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1514 continue;
1515 }
1516
1517 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1518 if((unsigned int)mpv->physent) { /* Is there a physical entry? */
1519 pa = (vm_offset_t)mpv->physent->pte1 & -PAGE_SIZE; /* Yes, get the physical address from there */
1520 }
1521 else {
1522 pa = (vm_offset_t)(mpv->PTEr & PAGE_SIZE); /* Otherwise from the mapping */
1523 }
1524
1525 switch (*value) { /* What type was that again? */
1526 case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */
1527 sync_cache(pa, PAGE_SIZE); /* Sync up dem caches */
1528 break; /* Done with this one here... */
1529
1530 case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */
1531 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1532 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1533 break; /* Done with this one here... */
1534
1535 case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */
1536 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1537 break; /* Done with this one here... */
1538
1539 case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */
1540 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1541 break; /* Done with this one here... */
1542 }
1543 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry if it exists*/
1544
1545 sva += PAGE_SIZE; /* Point to the next page */
1546 }
1547 splx(s);
1548 break;
1549
1550 case MATTR_VAL_GET_INFO: /* Get info */
1551 total = 0;
1552 s = splhigh(); /* Lock 'em out */
1553
1554 if (size <= PAGE_SIZE) { /* Do they want just one page */
1555 seg = sva >> 28; /* Isolate segment */
1556 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1557 else actpmap = pmap; /* Otherwise use the one passed in */
1558 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1559 *value = 0; /* Return nothing if no mapping */
1560 }
1561 else {
1562 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1563 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1564 }
1565 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1566 if(pp = mpv->physent) { /* Check for a physical entry */
1567 total = 0; /* Clear the count */
1568 for (mpv = (mapping *)hw_cpv((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)); mpv != NULL; mpv = hw_cpv(mp->next)) total++; /* Count the mapping */
1569 *value = (vm_machine_attribute_val_t) ((pp->pte1 & -PAGE_SIZE) | total); /* Pass back the physical address and the count of mappings */
1570 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1571 }
1572 else { /* This is the case for an I/O mapped area */
1573 *value = (vm_machine_attribute_val_t) ((mpv->PTEr & -PAGE_SIZE) | 1); /* Pass back the physical address and the count of mappings */
1574 }
1575 }
1576 }
1577 else {
1578 total = 0;
1579 while (sva < eva) {
1580 seg = sva >> 28; /* Isolate segment */
1581 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1582 else actpmap = pmap; /* Otherwise use the one passed in */
1583
1584 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1585 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1586 if(!sva) break; /* We tried to wrap, kill loop... */
1587 continue; /* Check the next... */
1588 }
1589 if(mp = hw_lock_phys_vir(actpmap->space, sva)) { /* Find the mapping for this vaddr and lock physent */
1590 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1591 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1592 continue;
1593 }
1594 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1595 total += 65536 + (mpv->physent && ((mapping *)((unsigned int)mpv->physent->phys_link & -32))->next); /* Count the "resident" and shared pages */
1596 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1597 }
1598 sva += PAGE_SIZE;
1599 }
1600 *value = total;
1601 }
1602 splx(s);
1603 break;
1604
1605 case MATTR_VAL_GET: /* return current value */
1606 case MATTR_VAL_OFF: /* turn attribute off */
1607 case MATTR_VAL_ON: /* turn attribute on */
1608 default:
1609 ret = KERN_INVALID_ARGUMENT;
1610 break;
1611 }
1612
1613 debugLog2(55, 0, 0); /* Log pmap_map call */
1614
1615 return ret;
1616}
1617
765c9de3
A
1618/*
1619 * pmap_sync_caches_phys(vm_offset_t pa)
1620 *
1621 * Invalidates all of the instruction cache on a physical page and
1622 * pushes any dirty data from the data cache for the same physical page
1623 */
1624
1625void pmap_sync_caches_phys(vm_offset_t pa) {
1626
1627 spl_t s;
1628
1629 s = splhigh(); /* No interruptions here */
1630 sync_cache(trunc_page(pa), PAGE_SIZE); /* Sync up dem caches */
1631 splx(s); /* Allow interruptions */
1632 return;
1633}
1634
1c79356b
A
1635/*
1636 * pmap_collect
1637 *
1638 * Garbage collects the physical map system for pages that are no longer used.
1639 * It isn't implemented or needed or wanted.
1640 */
1641void
1642pmap_collect(pmap_t pmap)
1643{
1644 return;
1645}
1646
1647/*
1648 * Routine: pmap_activate
1649 * Function:
1650 * Binds the given physical map to the given
1651 * processor, and returns a hardware map description.
1652 * It isn't implemented or needed or wanted.
1653 */
1654void
1655pmap_activate(
1656 pmap_t pmap,
1657 thread_t th,
1658 int which_cpu)
1659{
1660 return;
1661}
1662/*
1663 * pmap_deactivate:
1664 * It isn't implemented or needed or wanted.
1665 */
1666void
1667pmap_deactivate(
1668 pmap_t pmap,
1669 thread_t th,
1670 int which_cpu)
1671{
1672 return;
1673}
1674
1675#if DEBUG
1676
1677/*
1678 * pmap_zero_page
1679 * pmap_copy page
1680 *
1681 * are implemented in movc.s, these
1682 * are just wrappers to help debugging
1683 */
1684
1685extern void pmap_zero_page_assembler(vm_offset_t p);
1686extern void pmap_copy_page_assembler(vm_offset_t src, vm_offset_t dst);
1687
1688/*
1689 * pmap_zero_page(pa)
1690 *
1691 * pmap_zero_page zeros the specified (machine independent) page pa.
1692 */
1693void
1694pmap_zero_page(
1695 vm_offset_t p)
1696{
1697 register struct mapping *mp;
1698 register struct phys_entry *pp;
1699
1700 if (pmdebug & (PDB_USER|PDB_ZERO))
1701 kprintf("pmap_zero_page(pa=%x)\n", p);
1702
1703 /*
1704 * XXX can these happen?
1705 */
1706 if (pmap_find_physentry(p) == PHYS_NULL)
1707 panic("zero_page: physaddr out of range");
1708
1709 pmap_zero_page_assembler(p);
1710}
1711
1712/*
1713 * pmap_copy_page(src, dst)
1714 *
1715 * pmap_copy_page copies the specified (machine independent)
1716 * page from physical address src to physical address dst.
1717 *
1718 * We need to invalidate the cache for address dst before
1719 * we do the copy. Apparently there won't be any mappings
1720 * to the dst address normally.
1721 */
1722void
1723pmap_copy_page(
1724 vm_offset_t src,
1725 vm_offset_t dst)
1726{
1727 register struct phys_entry *pp;
1728
1729 if (pmdebug & (PDB_USER|PDB_COPY))
1730 kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src, dst);
1731 if (pmdebug & PDB_COPY)
1732 kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n",
1733 src, dst, PAGE_SIZE);
1734
1735 pmap_copy_page_assembler(src, dst);
1736}
1737#endif /* DEBUG */
1738
1739/*
1740 * pmap_pageable(pmap, s, e, pageable)
1741 * Make the specified pages (by pmap, offset)
1742 * pageable (or not) as requested.
1743 *
1744 * A page which is not pageable may not take
1745 * a fault; therefore, its page table entry
1746 * must remain valid for the duration.
1747 *
1748 * This routine is merely advisory; pmap_enter()
1749 * will specify that these pages are to be wired
1750 * down (or not) as appropriate.
1751 *
1752 * (called from vm/vm_fault.c).
1753 */
1754void
1755pmap_pageable(
1756 pmap_t pmap,
1757 vm_offset_t start,
1758 vm_offset_t end,
1759 boolean_t pageable)
1760{
1761
1762 return; /* This is not used... */
1763
1764}
1765/*
1766 * Routine: pmap_change_wiring
1767 * NOTE USED ANYMORE.
1768 */
1769void
1770pmap_change_wiring(
1771 register pmap_t pmap,
1772 vm_offset_t va,
1773 boolean_t wired)
1774{
1775 return; /* This is not used... */
1776}
1777
1778/*
1779 * pmap_modify_pages(pmap, s, e)
1780 * sets the modified bit on all virtual addresses v in the
1781 * virtual address range determined by [s, e] and pmap,
1782 * s and e must be on machine independent page boundaries and
1783 * s must be less than or equal to e.
1784 */
1785void
1786pmap_modify_pages(
1787 pmap_t pmap,
1788 vm_offset_t sva,
1789 vm_offset_t eva)
1790{
1791 spl_t spl;
1792 mapping *mp;
1793
1794#if PMAP_LOWTRACE
1795 dbgTrace(0xF1D00010, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1796#endif
1797
1798#if DEBUG
1799 if (pmdebug & PDB_USER) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap, sva, eva);
1800#endif
1801
1802 if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */
1803
1804 debugLog2(56, sva, eva); /* Log pmap_map call */
1805
1806 spl=splhigh(); /* Don't bother me */
1807
1808 for ( ; sva < eva; sva += PAGE_SIZE) { /* Cycle through the whole range */
1809 mp = hw_lock_phys_vir(pmap->space, sva); /* Lock the physical entry for this mapping */
1810 if(mp) { /* Did we find one? */
1811 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1812 panic("pmap_modify_pages: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1813 continue;
1814 }
1815 mp = hw_cpv(mp); /* Convert to virtual addressing */
1816 if(!mp->physent) continue; /* No physical entry means an I/O page, we can't set attributes */
1817 mapping_set_mod(mp->physent); /* Set the modfied bit for this page */
1818 hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1819 }
1820 }
1821 splx(spl); /* Restore the interrupt level */
1822
1823 debugLog2(57, 0, 0); /* Log pmap_map call */
1824 return; /* Leave... */
1825}
1826
1827/*
1828 * pmap_clear_modify(phys)
1829 * clears the hardware modified ("dirty") bit for one
1830 * machine independant page starting at the given
1831 * physical address. phys must be aligned on a machine
1832 * independant page boundary.
1833 */
1834void
1835pmap_clear_modify(vm_offset_t pa)
1836{
1837 register struct phys_entry *pp;
1838 spl_t spl;
1839
1840#if PMAP_LOWTRACE
1841 dbgTrace(0xF1D00011, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1842#endif
1843#if DEBUG
1844 if (pmdebug & PDB_USER)
1845 kprintf("pmap_clear_modify(pa=%x)\n", pa);
1846#endif
1847
1848 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1849 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1850
1851 debugLog2(58, pa, 0); /* Log pmap_map call */
1852
1853 spl=splhigh(); /* Don't bother me */
1854
1855 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1856 panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1857 splx(spl); /* Restore 'rupts */
1858 return; /* Should die before here */
1859 }
1860
1861 mapping_clr_mod(pp); /* Clear all change bits for physical page */
1862
1863 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1864 splx(spl); /* Restore the interrupt level */
1865
1866 debugLog2(59, 0, 0); /* Log pmap_map call */
1867}
1868
1869/*
1870 * pmap_is_modified(phys)
1871 * returns TRUE if the given physical page has been modified
1872 * since the last call to pmap_clear_modify().
1873 */
1874boolean_t
1875pmap_is_modified(register vm_offset_t pa)
1876{
1877 register struct phys_entry *pp;
1878 spl_t spl;
1879 boolean_t ret;
1880
1881
1882#if PMAP_LOWTRACE
1883 dbgTrace(0xF1D00012, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1884#endif
1885#if DEBUG
1886 if (pmdebug & PDB_USER)
1887 kprintf("pmap_is_modified(pa=%x)\n", pa);
1888#endif
1889
1890 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1891 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1892
1893 debugLog2(60, pa, 0); /* Log pmap_map call */
1894
1895 spl=splhigh(); /* Don't bother me */
1896
1897 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1898 panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1899 splx(spl); /* Restore 'rupts */
1900 return 0; /* Should die before here */
1901 }
1902
1903 ret = mapping_tst_mod(pp); /* Check for modified */
1904
1905 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1906 splx(spl); /* Restore the interrupt level */
1907
1908 debugLog2(61, ret, 0); /* Log pmap_map call */
1909
1910 return ret;
1911}
1912
1913/*
1914 * pmap_clear_reference(phys)
1915 * clears the hardware referenced bit in the given machine
1916 * independant physical page.
1917 *
1918 */
1919void
1920pmap_clear_reference(vm_offset_t pa)
1921{
1922 register struct phys_entry *pp;
1923 spl_t spl;
1924
1925
1926#if PMAP_LOWTRACE
1927 dbgTrace(0xF1D00013, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1928#endif
1929#if DEBUG
1930 if (pmdebug & PDB_USER)
1931 kprintf("pmap_clear_reference(pa=%x)\n", pa);
1932#endif
1933
1934 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1935 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1936
1937 debugLog2(62, pa, 0); /* Log pmap_map call */
1938
1939 spl=splhigh(); /* Don't bother me */
1940 mapping_clr_ref(pp); /* Clear all reference bits for physical page */
1941 splx(spl); /* Restore the interrupt level */
1942
1943 debugLog2(63, 0, 0); /* Log pmap_map call */
1944
1945}
1946
1947/*
1948 * pmap_is_referenced(phys)
1949 * returns TRUE if the given physical page has been referenced
1950 * since the last call to pmap_clear_reference().
1951 */
1952boolean_t
1953pmap_is_referenced(vm_offset_t pa)
1954{
1955 register struct phys_entry *pp;
1956 spl_t spl;
1957 boolean_t ret;
1958
1959
1960#if PMAP_LOWTRACE
1961 dbgTrace(0xF1D00014, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1962#endif
1963#if DEBUG
1964 if (pmdebug & PDB_USER)
1965 kprintf("pmap_is_referenced(pa=%x)\n", pa);
1966#endif
1967
1968 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1969 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1970
1971 debugLog2(64, pa, 0); /* Log pmap_map call */
1972
1973 spl=splhigh(); /* Don't bother me */
1974
1975 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1976 panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1977 splx(spl); /* Restore 'rupts */
1978 return 0; /* Should die before here */
1979 }
1980
1981 ret = mapping_tst_ref(pp); /* Check for referenced */
1982
1983 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1984 splx(spl); /* Restore the interrupt level */
1985
1986 debugLog2(65, ret, 0); /* Log pmap_map call */
1987
1988 return ret;
1989}
1990
1991#if MACH_VM_DEBUG
1992int
1993pmap_list_resident_pages(
1994 register pmap_t pmap,
1995 register vm_offset_t *listp,
1996 register int space)
1997{
1998 return 0;
1999}
2000#endif /* MACH_VM_DEBUG */
2001
2002/*
2003 * Locking:
2004 * spl: VM
2005 */
2006void
2007pmap_copy_part_page(
2008 vm_offset_t src,
2009 vm_offset_t src_offset,
2010 vm_offset_t dst,
2011 vm_offset_t dst_offset,
2012 vm_size_t len)
2013{
2014 register struct phys_entry *pp_src, *pp_dst;
2015 spl_t s;
2016
2017
2018#if PMAP_LOWTRACE
2019 dbgTrace(0xF1D00019, (unsigned int)src+src_offset, (unsigned int)dst+dst_offset); /* (TEST/DEBUG) */
2020 dbgTrace(0xF1D04019, (unsigned int)len, 0); /* (TEST/DEBUG) */
2021#endif
2022 s = splhigh();
2023
2024 assert(((dst & PAGE_MASK)+dst_offset+len) <= PAGE_SIZE);
2025 assert(((src & PAGE_MASK)+src_offset+len) <= PAGE_SIZE);
2026
2027 /*
2028 * Since the source and destination are physical addresses,
2029 * turn off data translation to perform a bcopy() in bcopy_phys().
2030 */
2031 phys_copy((vm_offset_t) src+src_offset,
2032 (vm_offset_t) dst+dst_offset, len);
2033
2034 splx(s);
2035}
2036
2037void
2038pmap_zero_part_page(
2039 vm_offset_t p,
2040 vm_offset_t offset,
2041 vm_size_t len)
2042{
2043 panic("pmap_zero_part_page");
2044}
2045
2046boolean_t pmap_verify_free(vm_offset_t pa) {
2047
2048 struct phys_entry *pp;
2049
2050#if PMAP_LOWTRACE
2051 dbgTrace(0xF1D00007, (unsigned int)pa, 0); /* (TEST/DEBUG) */
2052#endif
2053
2054#if DEBUG
2055 if (pmdebug & PDB_USER)
2056 kprintf("pmap_verify_free(pa=%x)\n", pa);
2057#endif
2058
2059 if (!pmap_initialized) return(TRUE);
2060
2061 pp = pmap_find_physentry(pa); /* Look up the physical entry */
2062 if (pp == PHYS_NULL) return FALSE; /* If there isn't one, show no mapping... */
2063 return ((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS) == MAPPING_NULL); /* Otherwise, return TRUE if mapping exists... */
2064}
2065
2066
2067/* Determine if we need to switch space and set up for it if so */
2068
2069void pmap_switch(pmap_t map)
2070{
2071 unsigned int i;
2072
2073#if DEBUG
2074 if (watchacts & WA_PCB) {
2075 kprintf("Switching to map at 0x%08x, space=%d\n",
2076 map,map->space);
2077 }
2078#endif /* DEBUG */
2079
2080
2081/* when changing to kernel space, don't bother
2082 * doing anything, the kernel is mapped from here already.
2083 */
2084 if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */
2085 return; /* If so, we don't do anything... */
2086 }
2087
2088 hw_set_user_space(map); /* Indicate if we need to load the SRs or not */
2089 return; /* Bye, bye, butterfly... */
2090}
2091
2092/*
2093 * kern_return_t pmap_nest(grand, subord, vaddr, size)
2094 *
2095 * grand = the pmap that we will nest subord into
2096 * subord = the pmap that goes into the grand
2097 * vaddr = start of range in pmap to be inserted
2098 * size = size of range in pmap to be inserted
2099 *
2100 * Inserts a pmap into another. This is used to implement shared segments.
2101 * On the current PPC processors, this is limited to segment (256MB) aligned
2102 * segment sized ranges.
2103 */
2104
2105kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size) {
2106
2107 unsigned int oflags, seg, grandr;
2108 int i;
2109
2110 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2111 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2112
2113 while(1) { /* Test and set the subordinate flag */
2114 oflags = subord->vflags & ~pmapAltSeg; /* Get old unset value */
2115 if(subord->vflags & pmapAltSeg) { /* Are trying to nest one already nested? */
2116 panic("pmap_nest: Attempt to nest an already nested pmap\n");
2117 }
2118 if(hw_compare_and_store(oflags, oflags | pmapSubord, &subord->vflags)) break; /* Done if we got it set */
2119 }
2120
2121 simple_lock(&grand->lock); /* Lock the superior pmap */
2122
2123 if(grand->vflags & pmapSubord) { /* Are we only one level deep? */
2124 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2125 panic("pmap_nest: Attempt to nest into subordinate pmap\n");
2126 return KERN_FAILURE; /* Shame on you */
2127 }
2128
2129 seg = vaddr >> 28; /* Isolate the segment number */
2130 if((0x00008000 >> seg) & grand->vflags) { /* See if it is already in use */
2131 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2132 panic("pmap_nest: Attempt to nest into already nested segment\n");
2133 return KERN_FAILURE; /* Shame on you */
2134 }
2135
2136 grand->pmapPmaps[seg] = subord; /* Set the pointer to the subordinate */
2137 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | subord->space; /* Set the vsid to the subordinate's vsid */
2138 grand->vflags |= (0x00008000 >> seg); /* Set in-use bit */
2139
2140 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
2141
2142 simple_unlock(&grand->lock); /* Unlock the grand pmap */
2143
2144
2145/*
2146 * Note that the following will force the segment registers to be reloaded following
2147 * the next interrupt on all processors if they are using the pmap we just changed.
2148 *
1c79356b
A
2149 */
2150
2151
2152 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
0b4e3aa0 2153 (void)hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap); /* Clear if ours */
1c79356b
A
2154 }
2155
2156 return KERN_SUCCESS; /* Bye, bye, butterfly... */
2157}
2158
2159
2160/*
2161 * kern_return_t pmap_unnest(grand, vaddr, size)
2162 *
2163 * grand = the pmap that we will nest subord into
2164 * vaddr = start of range in pmap to be inserted
2165 * size = size of range in pmap to be inserted
2166 *
2167 * Removes a pmap from another. This is used to implement shared segments.
2168 * On the current PPC processors, this is limited to segment (256MB) aligned
2169 * segment sized ranges.
2170 */
2171
2172kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) {
2173
2174 unsigned int oflags, seg, grandr, tstamp;
2175 int i, tcpu, mycpu;
2176
2177 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2178 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2179
2180 simple_lock(&grand->lock); /* Lock the superior pmap */
2181 disable_preemption(); /* It's all for me! */
2182
2183 seg = vaddr >> 28; /* Isolate the segment number */
2184 if(!((0x00008000 >> seg) & grand->vflags)) { /* See if it is already in use */
2185 enable_preemption(); /* Ok, your turn */
2186 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2187 panic("pmap_unnest: Attempt to unnest an unnested segment\n");
2188 return KERN_FAILURE; /* Shame on you */
2189 }
2190
2191 grand->pmapPmaps[seg] = (pmap_t)0; /* Clear the pointer to the subordinate */
2192 grand->pmapSegs[seg] = grand->space; /* Set the pointer to the subordinate's vsid */
2193 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | grand->space; /* Set the vsid to the grand's vsid */
2194 grand->vflags &= ~(0x00008000 >> seg); /* Clear in-use bit */
2195
2196 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
2197
2198 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2199
2200/*
2201 * Note that the following will force the segment registers to be reloaded
2202 * on all processors (if they are using the pmap we just changed) before returning.
2203 *
2204 * This is needed. The reason is that until the segment register is
2205 * reloaded, another thread in the same task on a different processor will
2206 * be able to access memory that it isn't allowed to anymore. That can happen
2207 * because access to the subordinate pmap is being removed, but the pmap is still
2208 * valid.
2209 *
2210 * Note that we only kick the other processor if we see that it was using the pmap while we
2211 * were changing it.
2212 */
2213
2214
2215 mycpu = cpu_number(); /* Who am I? Am I just a dream? */
2216 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
0b4e3aa0 2217 if(hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap)) { /* Clear if ours and kick the other guy if he was using it */
1c79356b
A
2218 if(i == mycpu) continue; /* Don't diddle ourselves */
2219 tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */
0b4e3aa0
A
2220 if(cpu_signal(i, SIGPwake, 0, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change */
2221 continue;
1c79356b 2222 }
0b4e3aa0 2223 if(!hw_cpu_wcng(&per_proc_info[i].ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */
1c79356b
A
2224 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
2225 }
2226 }
2227 }
2228
2229 enable_preemption(); /* Others can run now */
2230 return KERN_SUCCESS; /* Bye, bye, butterfly... */
2231}
2232
2233
2234void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) {
2235
2236 int cnt, i, j, k;
2237 vm_offset_t xx;
2238
2239 if(!pmap) return;
2240
2241 sva = trunc_page(sva);
2242 eva = trunc_page(eva);
2243
2244 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2245 if((pmap->pmapUsage[i]) > 8192) { /* See if this is a sane number */
2246 panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
2247 i * pmapUsageSize, pmap->pmapUsage[i], pmap);
2248 }
2249 }
2250 j = 0;
2251 while(1) { /* Try multiple times */
2252 cnt = 0;
2253 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2254 cnt = cnt + pmap->pmapUsage[i]; /* Sum all slots */
2255 }
2256 if(cnt == pmap->stats.resident_count) break; /* We're ok if we match... */
2257
2258 j++;
2259 for(i = 0; i < 100000; i++) {
2260 k = j + i;
2261 }
2262 if(j >= 10) {
2263 panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n",
2264 cnt, pmap->stats.resident_count, pmap);
2265 }
2266 }
2267
2268 for(xx = sva; xx < eva; xx += PAGE_SIZE) { /* See if any slots not clear */
2269 if(pmap_extract(pmap, xx)) {
2270 panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n",
2271 sva, eva, xx, pmap);
2272 }
2273 }
2274}
2275
2276
9bccf70c
A
2277/* temporary workaround */
2278boolean_t
2279coredumpok(vm_map_t map, vm_offset_t va)
2280{
2281 return TRUE;
2282}
1c79356b 2283