]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/pmap.c
xnu-344.32.tar.gz
[apple/xnu.git] / osfmk / ppc / pmap.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1990,1991,1992 The University of Utah and
28 * the Center for Software Science (CSS).
29 * Copyright (c) 1991,1987 Carnegie Mellon University.
30 * All rights reserved.
31 *
32 * Permission to use, copy, modify and distribute this software and its
33 * documentation is hereby granted, provided that both the copyright
34 * notice and this permission notice appear in all copies of the
35 * software, derivative works or modified versions, and any portions
36 * thereof, and that both notices appear in supporting documentation,
37 * and that all advertising materials mentioning features or use of
38 * this software display the following acknowledgement: ``This product
39 * includes software developed by the Center for Software Science at
40 * the University of Utah.''
41 *
42 * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF
43 * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
44 * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
45 * THIS SOFTWARE.
46 *
47 * CSS requests users of this software to return to css-dist@cs.utah.edu any
48 * improvements that they make and grant CSS redistribution rights.
49 *
50 * Carnegie Mellon requests users of this software to return to
51 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
52 * School of Computer Science
53 * Carnegie Mellon University
54 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 *
58 * Utah $Hdr: pmap.c 1.28 92/06/23$
59 * Author: Mike Hibler, Bob Wheeler, University of Utah CSS, 10/90
60 */
61
62/*
63 * Manages physical address maps for powerpc.
64 *
65 * In addition to hardware address maps, this
66 * module is called upon to provide software-use-only
67 * maps which may or may not be stored in the same
68 * form as hardware maps. These pseudo-maps are
69 * used to store intermediate results from copy
70 * operations to and from address spaces.
71 *
72 * Since the information managed by this module is
73 * also stored by the logical address mapping module,
74 * this module may throw away valid virtual-to-physical
75 * mappings at almost any time. However, invalidations
76 * of virtual-to-physical mappings must be done as
77 * requested.
78 *
79 * In order to cope with hardware architectures which
80 * make virtual-to-physical map invalidates expensive,
81 * this module may delay invalidate or reduced protection
82 * operations until such time as they are actually
83 * necessary. This module is given full information to
84 * when physical maps must be made correct.
85 *
86 */
87
88#include <zone_debug.h>
89#include <cpus.h>
90#include <debug.h>
91#include <mach_kgdb.h>
92#include <mach_vm_debug.h>
93#include <db_machine_commands.h>
94
95#include <kern/thread.h>
9bccf70c 96#include <kern/simple_lock.h>
1c79356b
A
97#include <mach/vm_attributes.h>
98#include <mach/vm_param.h>
99#include <kern/spl.h>
100
101#include <kern/misc_protos.h>
102#include <ppc/misc_protos.h>
103#include <ppc/proc_reg.h>
104
105#include <vm/pmap.h>
106#include <vm/vm_map.h>
107#include <vm/vm_page.h>
108
109#include <ppc/pmap.h>
de355530 110#include <ppc/pmap_internals.h>
1c79356b
A
111#include <ppc/mem.h>
112#include <ppc/mappings.h>
113
114#include <ppc/new_screen.h>
115#include <ppc/Firmware.h>
116#include <ppc/savearea.h>
9bccf70c 117#include <ppc/exception.h>
1c79356b
A
118#include <ddb/db_output.h>
119
de355530
A
120#if DB_MACHINE_COMMANDS
121/* optionally enable traces of pmap operations in post-mortem trace table */
122/* #define PMAP_LOWTRACE 1 */
123#define PMAP_LOWTRACE 0
124#else /* DB_MACHINE_COMMANDS */
125/* Can not trace even if we wanted to */
126#define PMAP_LOWTRACE 0
127#endif /* DB_MACHINE_COMMANDS */
128
129#define PERFTIMES 0
130
131#if PERFTIMES && DEBUG
132#define debugLog2(a, b, c) dbgLog2(a, b, c)
133#else
134#define debugLog2(a, b, c)
135#endif
136
1c79356b
A
137extern unsigned int avail_remaining;
138extern unsigned int mappingdeb0;
139extern struct Saveanchor saveanchor; /* Aliged savearea anchor */
140extern int real_ncpus; /* Number of actual CPUs */
de355530 141unsigned int debugbackpocket; /* (TEST/DEBUG) */
1c79356b 142
de355530 143vm_offset_t avail_next;
1c79356b
A
144vm_offset_t first_free_virt;
145int current_free_region; /* Used in pmap_next_page */
146
147/* forward */
148void pmap_activate(pmap_t pmap, thread_t th, int which_cpu);
149void pmap_deactivate(pmap_t pmap, thread_t th, int which_cpu);
150void copy_to_phys(vm_offset_t sva, vm_offset_t dpa, int bytecount);
151
152#if MACH_VM_DEBUG
153int pmap_list_resident_pages(pmap_t pmap, vm_offset_t *listp, int space);
154#endif
155
de355530
A
156#if DEBUG
157#define PDB_USER 0x01 /* exported functions */
158#define PDB_MAPPING 0x02 /* low-level mapping routines */
159#define PDB_ENTER 0x04 /* pmap_enter specifics */
160#define PDB_COPY 0x08 /* copy page debugging */
161#define PDB_ZERO 0x10 /* zero page debugging */
162#define PDB_WIRED 0x20 /* things concerning wired entries */
163#define PDB_PTEG 0x40 /* PTEG overflows */
164#define PDB_LOCK 0x100 /* locks */
165#define PDB_IO 0x200 /* Improper use of WIMG_IO checks - PCI machines */
166
167int pmdebug=0;
168#endif
169
1c79356b
A
170/* NOTE: kernel_pmap_store must be in V=R storage and aligned!!!!!!!!!!!!!! */
171
172extern struct pmap kernel_pmap_store;
173pmap_t kernel_pmap; /* Pointer to kernel pmap and anchor for in-use pmaps */
174pmap_t cursor_pmap; /* Pointer to last pmap allocated or previous if removed from in-use list */
175struct zone *pmap_zone; /* zone of pmap structures */
176boolean_t pmap_initialized = FALSE;
177
178/*
179 * Physical-to-virtual translations are handled by inverted page table
180 * structures, phys_tables. Multiple mappings of a single page are handled
181 * by linking the affected mapping structures. We initialise one region
182 * for phys_tables of the physical memory we know about, but more may be
183 * added as it is discovered (eg. by drivers).
184 */
de355530
A
185struct phys_entry *phys_table; /* For debugging */
186
187lock_t pmap_system_lock;
188
189decl_simple_lock_data(,tlb_system_lock)
1c79356b
A
190
191/*
192 * free pmap list. caches the first free_pmap_max pmaps that are freed up
193 */
194int free_pmap_max = 32;
195int free_pmap_count;
196pmap_t free_pmap_list;
197decl_simple_lock_data(,free_pmap_lock)
198
199/*
200 * Function to get index into phys_table for a given physical address
201 */
202
de355530 203struct phys_entry *pmap_find_physentry(vm_offset_t pa)
1c79356b
A
204{
205 int i;
de355530 206 struct phys_entry *entry;
1c79356b 207
de355530
A
208 for (i = pmap_mem_regions_count-1; i >= 0; i--) {
209 if (pa < pmap_mem_regions[i].start)
210 continue;
211 if (pa >= pmap_mem_regions[i].end)
212 return PHYS_NULL;
1c79356b 213
de355530
A
214 entry = &pmap_mem_regions[i].phys_table[(pa - pmap_mem_regions[i].start) >> PPC_PGSHIFT];
215 __asm__ volatile("dcbt 0,%0" : : "r" (entry)); /* We will use this in a little bit */
216 return entry;
1c79356b 217 }
de355530
A
218 kprintf("DEBUG : pmap_find_physentry 0x%08x out of range\n",pa);
219 return PHYS_NULL;
1c79356b
A
220}
221
222/*
223 * kern_return_t
224 * pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
225 * boolean_t available, unsigned int attr)
de355530
A
226 * Allocate some extra physentries for the physical addresses given,
227 * specifying some default attribute that on the powerpc specifies
228 * the default cachability for any mappings using these addresses
229 * If the memory is marked as available, it is added to the general
230 * VM pool, otherwise it is not (it is reserved for card IO etc).
1c79356b
A
231 */
232kern_return_t pmap_add_physical_memory(vm_offset_t spa, vm_offset_t epa,
233 boolean_t available, unsigned int attr)
234{
de355530
A
235 int i,j;
236 spl_t s;
237
238 /* Only map whole pages */
1c79356b
A
239
240 panic("Forget it! You can't map no more memory, you greedy puke!\n");
de355530
A
241
242 spa = trunc_page(spa);
243 epa = round_page(epa);
244
245 /* First check that the region doesn't already exist */
246
247 assert (epa >= spa);
248 for (i = 0; i < pmap_mem_regions_count; i++) {
249 /* If we're below the next region, then no conflict */
250 if (epa < pmap_mem_regions[i].start)
251 break;
252 if (spa < pmap_mem_regions[i].end) {
253#if DEBUG
254 kprintf("pmap_add_physical_memory(0x%08x,0x%08x,0x%08x) - memory already present\n",spa,epa,attr);
255#endif /* DEBUG */
256 return KERN_NO_SPACE;
257 }
258 }
259
260#if DEBUG
261 kprintf("pmap_add_physical_memory; region insert spot: %d out of %d\n", i, pmap_mem_regions_count); /* (TEST/DEBUG) */
262#endif
263
264 /* Check that we've got enough space for another region */
265 if (pmap_mem_regions_count == PMAP_MEM_REGION_MAX)
266 return KERN_RESOURCE_SHORTAGE;
267
268 /* Once here, i points to the mem_region above ours in physical mem */
269
270 /* allocate a new phys_table for this new region */
271#if DEBUG
272 kprintf("pmap_add_physical_memory; kalloc\n"); /* (TEST/DEBUG) */
273#endif
274
275 phys_table = (struct phys_entry *)
276 kalloc(sizeof(struct phys_entry) * atop(epa-spa));
277#if DEBUG
278 kprintf("pmap_add_physical_memory; new phys_table: %08X\n", phys_table); /* (TEST/DEBUG) */
279#endif
280
281 /* Initialise the new phys_table entries */
282 for (j = 0; j < atop(epa-spa); j++) {
283
284 phys_table[j].phys_link = MAPPING_NULL;
285
286 mapping_phys_init(&phys_table[j], spa+(j*PAGE_SIZE), attr); /* Initialize the hardware specific portions */
287
288 }
289 s = splhigh();
290
291 /* Move all the phys_table entries up some to make room in
292 * the ordered list.
293 */
294 for (j = pmap_mem_regions_count; j > i ; j--)
295 pmap_mem_regions[j] = pmap_mem_regions[j-1];
296
297 /* Insert a new entry with some memory to back it */
298
299 pmap_mem_regions[i].start = spa;
300 pmap_mem_regions[i].end = epa;
301 pmap_mem_regions[i].phys_table = phys_table;
302
303 pmap_mem_regions_count++;
304 splx(s);
305
306#if DEBUG
307 for(i=0; i<pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
308 kprintf("region %d: %08X %08X %08X\n", i, pmap_mem_regions[i].start,
309 pmap_mem_regions[i].end, pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
310 }
311#endif
312
313 if (available) {
314 kprintf("warning : pmap_add_physical_mem() "
315 "available not yet supported\n");
316 }
317
1c79356b
A
318 return KERN_SUCCESS;
319}
320
321/*
322 * pmap_map(va, spa, epa, prot)
323 * is called during boot to map memory in the kernel's address map.
324 * A virtual address range starting at "va" is mapped to the physical
325 * address range "spa" to "epa" with machine independent protection
326 * "prot".
327 *
328 * "va", "spa", and "epa" are byte addresses and must be on machine
329 * independent page boundaries.
330 *
331 * Pages with a contiguous virtual address range, the same protection, and attributes.
332 * therefore, we map it with a single block.
333 *
334 */
335vm_offset_t
336pmap_map(
337 vm_offset_t va,
338 vm_offset_t spa,
339 vm_offset_t epa,
340 vm_prot_t prot)
341{
342
de355530
A
343
344 if (spa == epa)
345 return(va);
346
347 assert(epa > spa);
348 debugLog2(40, va, spa); /* Log pmap_map call */
349
350 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_DEFAULT, blkPerm); /* Set up a permanent block mapped area */
351
352 debugLog2(41, epa, prot); /* Log pmap_map call */
353
354 return(va);
355}
356
357/*
358 * pmap_map_bd(va, spa, epa, prot)
359 * Back-door routine for mapping kernel VM at initialisation.
360 * Used for mapping memory outside the known physical memory
361 * space, with caching disabled. Designed for use by device probes.
362 *
363 * A virtual address range starting at "va" is mapped to the physical
364 * address range "spa" to "epa" with machine independent protection
365 * "prot".
366 *
367 * "va", "spa", and "epa" are byte addresses and must be on machine
368 * independent page boundaries.
369 *
370 * WARNING: The current version of memcpy() can use the dcbz instruction
371 * on the destination addresses. This will cause an alignment exception
372 * and consequent overhead if the destination is caching-disabled. So
373 * avoid memcpy()ing into the memory mapped by this function.
374 *
375 * also, many other pmap_ routines will misbehave if you try and change
376 * protections or remove these mappings, they are designed to be permanent.
377 *
378 * These areas will be added to the autogen list, if possible. Existing translations
379 * are overridden and their mapping stuctures are released. This takes place in
380 * the autogen_map function.
381 *
382 * Locking:
383 * this routine is called only during system initialization when only
384 * one processor is active, so no need to take locks...
385 */
386vm_offset_t
387pmap_map_bd(
388 vm_offset_t va,
389 vm_offset_t spa,
390 vm_offset_t epa,
391 vm_prot_t prot)
392{
393 register struct mapping *mp;
394 register struct phys_entry *pp;
1c79356b 395
de355530
A
396
397 if (spa == epa)
398 return(va);
1c79356b
A
399
400 assert(epa > spa);
401
de355530
A
402 debugLog2(42, va, epa); /* Log pmap_map_bd call */
403
404 pmap_map_block(kernel_pmap, va, spa, epa - spa, prot, PTE_WIMG_IO, blkPerm); /* Set up autogen area */
405
406 debugLog2(43, epa, prot); /* Log pmap_map_bd exit */
1c79356b
A
407
408 return(va);
409}
410
411/*
412 * Bootstrap the system enough to run with virtual memory.
413 * Map the kernel's code and data, and allocate the system page table.
414 * Called with mapping done by BATs. Page_size must already be set.
415 *
416 * Parameters:
de355530 417 * mem_size: Total memory present
1c79356b 418 * first_avail: First virtual address available
de355530 419 * first_phys_avail: First physical address available
1c79356b
A
420 */
421void
de355530 422pmap_bootstrap(unsigned int mem_size, vm_offset_t *first_avail, vm_offset_t *first_phys_avail, unsigned int kmapsize)
1c79356b
A
423{
424 register struct mapping *mp;
425 vm_offset_t addr;
426 vm_size_t size;
de355530
A
427 int i, num, j, rsize, mapsize, vmpagesz, vmmapsz;
428 unsigned int mask;
429 vm_offset_t first_used_addr;
430 PCA *pcaptr;
1c79356b 431
de355530
A
432 *first_avail = round_page(*first_avail);
433
434#if DEBUG
435 kprintf("first_avail=%08X; first_phys_avail=%08X; avail_remaining=%d\n",
436 *first_avail, *first_phys_avail, avail_remaining);
437#endif
438
439 assert(PAGE_SIZE == PPC_PGBYTES);
1c79356b
A
440
441 /*
442 * Initialize kernel pmap
443 */
444 kernel_pmap = &kernel_pmap_store;
445 cursor_pmap = &kernel_pmap_store;
446
de355530
A
447 lock_init(&pmap_system_lock,
448 FALSE, /* NOT a sleep lock */
449 ETAP_VM_PMAP_SYS,
450 ETAP_VM_PMAP_SYS_I);
451
1c79356b
A
452 simple_lock_init(&kernel_pmap->lock, ETAP_VM_PMAP_KERNEL);
453
454 kernel_pmap->pmap_link.next = (queue_t)kernel_pmap; /* Set up anchor forward */
455 kernel_pmap->pmap_link.prev = (queue_t)kernel_pmap; /* Set up anchor reverse */
456 kernel_pmap->ref_count = 1;
457 kernel_pmap->space = PPC_SID_KERNEL;
de355530
A
458 kernel_pmap->pmapvr = 0; /* Virtual = Real */
459 kernel_pmap->bmaps = 0; /* No block pages just yet */
460 for(i=0; i < 128; i++) { /* Clear usage slots */
461 kernel_pmap->pmapUsage[i] = 0;
462 }
463 for(i=0; i < 16; i++) { /* Initialize for laughs */
464 kernel_pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | PPC_SID_KERNEL;
465 }
1c79356b 466
de355530
A
467 /*
468 * Allocate: (from first_avail up)
469 * Aligned to its own size:
470 * hash table (for mem size 2**x, allocate 2**(x-10) entries)
471 * mapping table (same size and immediatly following hash table)
472 */
473 /* hash_table_size must be a power of 2, recommended sizes are
474 * taken from PPC601 User Manual, table 6-19. We take the next
475 * highest size if mem_size is not a power of two.
476 * TODO NMGS make this configurable at boot time.
477 */
b4c24cb9 478
de355530 479 num = sizeof(pte_t) * (mem_size >> 10);
1c79356b 480
de355530
A
481 for (hash_table_size = 64 * 1024; /* minimum size = 64Kbytes */
482 hash_table_size < num;
483 hash_table_size *= 2)
484 continue;
1c79356b 485
de355530
A
486 if (num > (sizeof(pte_t) * 524288))
487 hash_table_size = hash_table_size/2; /* reduce by half above 512MB */
1c79356b 488
de355530
A
489 /* Scale to within any physical memory layout constraints */
490 do {
491 num = atop(mem_size); /* num now holds mem_size in pages */
1c79356b 492
de355530 493 /* size of all structures that we're going to allocate */
1c79356b 494
de355530
A
495 size = (vm_size_t) (
496 (InitialSaveBloks * PAGE_SIZE) + /* Allow space for the initial context saveareas */
497 ((InitialSaveBloks / 2) * PAGE_SIZE) + /* For backpocket saveareas */
498 hash_table_size + /* For hash table */
499 hash_table_size + /* For PTEG allocation table */
500 (num * sizeof(struct phys_entry)) /* For the physical entries */
501 );
1c79356b 502
de355530
A
503 mapsize = size = round_page(size); /* Get size of area to map that we just calculated */
504 mapsize = mapsize + kmapsize; /* Account for the kernel text size */
505
506 vmpagesz = round_page(num * sizeof(struct vm_page)); /* Allow for all vm_pages needed to map physical mem */
507 vmmapsz = round_page((num / 8) * sizeof(struct vm_map_entry)); /* Allow for vm_maps */
508
509 mapsize = mapsize + vmpagesz + vmmapsz; /* Add the VM system estimates into the grand total */
510
511 mapsize = mapsize + (4 * 1024 * 1024); /* Allow for 4 meg of extra mappings */
512 mapsize = ((mapsize / PAGE_SIZE) + MAPPERBLOK - 1) / MAPPERBLOK; /* Get number of blocks of mappings we need */
513 mapsize = mapsize + ((mapsize + MAPPERBLOK - 1) / MAPPERBLOK); /* Account for the mappings themselves */
514
515#if DEBUG
516 kprintf("pmap_bootstrap: initial vm_pages = %08X\n", vmpagesz);
517 kprintf("pmap_bootstrap: initial vm_maps = %08X\n", vmmapsz);
518 kprintf("pmap_bootstrap: size before mappings = %08X\n", size);
519 kprintf("pmap_bootstrap: kernel map size = %08X\n", kmapsize);
520 kprintf("pmap_bootstrap: mapping blocks rqrd = %08X\n", mapsize);
521#endif
522
523 size = size + (mapsize * PAGE_SIZE); /* Get the true size we need */
1c79356b 524
de355530 525 /* hash table must be aligned to its size */
1c79356b 526
de355530
A
527 addr = (*first_avail +
528 (hash_table_size-1)) & ~(hash_table_size-1);
1c79356b 529
de355530
A
530 if (addr + size > pmap_mem_regions[0].end) {
531 hash_table_size /= 2;
532 } else {
533 break;
534 }
535 /* If we have had to shrink hash table to too small, panic */
536 if (hash_table_size == 32 * 1024)
537 panic("cannot lay out pmap memory map correctly");
538 } while (1);
539
540#if DEBUG
541 kprintf("hash table size=%08X, total size of area=%08X, addr=%08X\n",
542 hash_table_size, size, addr);
543#endif
544 if (round_page(*first_phys_avail) < trunc_page(addr)) {
545 /* We are stepping over at least one page here, so
546 * add this region to the free regions so that it can
547 * be allocated by pmap_steal
548 */
549 free_regions[free_regions_count].start = round_page(*first_phys_avail);
550 free_regions[free_regions_count].end = trunc_page(addr);
551
552 avail_remaining += (free_regions[free_regions_count].end -
553 free_regions[free_regions_count].start) /
554 PPC_PGBYTES;
555#if DEBUG
556 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
557 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
558 avail_remaining);
559#endif /* DEBUG */
560 free_regions_count++;
561 }
1c79356b 562
de355530
A
563 /* Zero everything - this also invalidates the hash table entries */
564 bzero((char *)addr, size);
1c79356b 565
de355530 566 /* Set up some pointers to our new structures */
1c79356b 567
de355530
A
568 /* from here, addr points to the next free address */
569
570 first_used_addr = addr; /* remember where we started */
1c79356b 571
de355530
A
572 /* Set up hash table address and dma buffer address, keeping
573 * alignment. These mappings are all 1-1, so dma_r == dma_v
574 *
575 * If hash_table_size == dma_buffer_alignment, then put hash_table
576 * first, since dma_buffer_size may be smaller than alignment, but
577 * hash table alignment==hash_table_size.
578 */
579 hash_table_base = addr;
580
581 addr += hash_table_size;
582 addr += hash_table_size; /* Add another for the PTEG Control Area */
583 assert((hash_table_base & (hash_table_size-1)) == 0);
1c79356b 584
de355530
A
585 pcaptr = (PCA *)(hash_table_base+hash_table_size); /* Point to the PCA table */
586 mapCtl.mapcflush.pcaptr = pcaptr;
587
588 for(i=0; i < (hash_table_size/64) ; i++) { /* For all of PTEG control areas: */
589 pcaptr[i].flgs.PCAalflgs.PCAfree=0xFF; /* Mark all slots free */
590 pcaptr[i].flgs.PCAalflgs.PCAsteal=0x01; /* Initialize steal position */
591 }
592
593 savearea_init(&addr); /* Initialize the savearea chains and data */
594
595 /* phys_table is static to help debugging,
596 * this variable is no longer actually used
597 * outside of this scope
598 */
d7e50217 599
de355530 600 phys_table = (struct phys_entry *) addr;
d7e50217 601
de355530
A
602#if DEBUG
603 kprintf("hash_table_base =%08X\n", hash_table_base);
604 kprintf("phys_table =%08X\n", phys_table);
605 kprintf("pmap_mem_regions_count =%08X\n", pmap_mem_regions_count);
606#endif
1c79356b 607
de355530 608 for (i = 0; i < pmap_mem_regions_count; i++) {
1c79356b 609
de355530
A
610 pmap_mem_regions[i].phys_table = phys_table;
611 rsize = (pmap_mem_regions[i].end - (unsigned int)pmap_mem_regions[i].start)/PAGE_SIZE;
1c79356b 612
de355530
A
613#if DEBUG
614 kprintf("Initializing physical table for region %d\n", i);
615 kprintf(" table=%08X, size=%08X, start=%08X, end=%08X\n",
616 phys_table, rsize, pmap_mem_regions[i].start,
617 (unsigned int)pmap_mem_regions[i].end);
618#endif
1c79356b 619
de355530
A
620 for (j = 0; j < rsize; j++) {
621 phys_table[j].phys_link = MAPPING_NULL;
622 mapping_phys_init(&phys_table[j], (unsigned int)pmap_mem_regions[i].start+(j*PAGE_SIZE),
623 PTE_WIMG_DEFAULT); /* Initializes hw specific storage attributes */
624 }
625 phys_table = phys_table +
626 atop(pmap_mem_regions[i].end - pmap_mem_regions[i].start);
627 }
1c79356b 628
de355530
A
629 /* restore phys_table for debug */
630 phys_table = (struct phys_entry *) addr;
1c79356b 631
de355530
A
632 addr += sizeof(struct phys_entry) * num;
633
634 simple_lock_init(&tlb_system_lock, ETAP_VM_PMAP_TLB);
1c79356b 635
de355530
A
636 /* Initialise the registers necessary for supporting the hashtable */
637#if DEBUG
638 kprintf("*** hash_table_init: base=%08X, size=%08X\n", hash_table_base, hash_table_size);
639#endif
1c79356b 640
de355530
A
641 hash_table_init(hash_table_base, hash_table_size);
642
1c79356b
A
643/*
644 * Remaining space is for mapping entries. Tell the initializer routine that
645 * the mapping system can't release this block because it's permanently assigned
646 */
647
de355530 648 mapping_init(); /* Initialize the mapping tables */
1c79356b
A
649
650 for(i = addr; i < first_used_addr + size; i += PAGE_SIZE) { /* Add initial mapping blocks */
de355530 651 mapping_free_init(i, 1, 0); /* Pass block address and say that this one is not releasable */
1c79356b 652 }
de355530
A
653 mapCtl.mapcmin = MAPPERBLOK; /* Make sure we only adjust one at a time */
654
655#if DEBUG
656
657 kprintf("mapping kernel memory from 0x%08x to 0x%08x, to address 0x%08x\n",
658 first_used_addr, round_page(first_used_addr+size),
659 first_used_addr);
660#endif /* DEBUG */
1c79356b
A
661
662 /* Map V=R the page tables */
663 pmap_map(first_used_addr, first_used_addr,
de355530
A
664 round_page(first_used_addr+size), VM_PROT_READ | VM_PROT_WRITE);
665
666#if DEBUG
1c79356b 667
de355530
A
668 for(i=first_used_addr; i < round_page(first_used_addr+size); i+=PAGE_SIZE) { /* Step through all these mappings */
669 if(i != (j = kvtophys(i))) { /* Verify that the mapping was made V=R */
670 kprintf("*** V=R mapping failed to verify: V=%08X; R=%08X\n", i, j);
671 }
672 }
673#endif
674
675 *first_avail = round_page(first_used_addr + size);
676 first_free_virt = round_page(first_used_addr + size);
1c79356b
A
677
678 /* All the rest of memory is free - add it to the free
679 * regions so that it can be allocated by pmap_steal
680 */
de355530
A
681 free_regions[free_regions_count].start = *first_avail;
682 free_regions[free_regions_count].end = pmap_mem_regions[0].end;
1c79356b 683
de355530
A
684 avail_remaining += (free_regions[free_regions_count].end -
685 free_regions[free_regions_count].start) /
686 PPC_PGBYTES;
1c79356b 687
de355530
A
688#if DEBUG
689 kprintf("ADDED FREE REGION from 0x%08x to 0x%08x, avail_remaining = %d\n",
690 free_regions[free_regions_count].start,free_regions[free_regions_count].end,
691 avail_remaining);
692#endif /* DEBUG */
693
694 free_regions_count++;
d7e50217 695
de355530
A
696 current_free_region = 0;
697
698 avail_next = free_regions[current_free_region].start;
699
700#if DEBUG
701 kprintf("Number of free regions=%d\n",free_regions_count); /* (TEST/DEBUG) */
702 kprintf("Current free region=%d\n",current_free_region); /* (TEST/DEBUG) */
703 for(i=0;i<free_regions_count; i++) { /* (TEST/DEBUG) */
704 kprintf("Free region %3d - from %08X to %08X\n", i, free_regions[i].start,
705 free_regions[i].end); /* (TEST/DEBUG) */
706 }
707 for (i = 0; i < pmap_mem_regions_count; i++) { /* (TEST/DEBUG) */
708 kprintf("PMAP region %3d - from %08X to %08X; phys=%08X\n", i, /* (TEST/DEBUG) */
709 pmap_mem_regions[i].start, /* (TEST/DEBUG) */
710 pmap_mem_regions[i].end, /* (TEST/DEBUG) */
711 pmap_mem_regions[i].phys_table); /* (TEST/DEBUG) */
712 }
713#endif
1c79356b
A
714
715}
716
717/*
718 * pmap_init(spa, epa)
719 * finishes the initialization of the pmap module.
720 * This procedure is called from vm_mem_init() in vm/vm_init.c
721 * to initialize any remaining data structures that the pmap module
722 * needs to map virtual memory (VM is already ON).
723 *
724 * Note that the pmap needs to be sized and aligned to
725 * a power of two. This is because it is used both in virtual and
726 * real so it can't span a page boundary.
727 */
728
729void
730pmap_init(void)
731{
732
733
734 pmap_zone = zinit(pmapSize, 400 * pmapSize, 4096, "pmap");
735#if ZONE_DEBUG
736 zone_debug_disable(pmap_zone); /* Can't debug this one 'cause it messes with size and alignment */
737#endif /* ZONE_DEBUG */
738
739 pmap_initialized = TRUE;
740
741 /*
742 * Initialize list of freed up pmaps
743 */
744 free_pmap_list = 0; /* Set that there are no free pmaps */
745 free_pmap_count = 0;
746 simple_lock_init(&free_pmap_lock, ETAP_VM_PMAP_CACHE);
747}
748
749unsigned int pmap_free_pages(void)
750{
751 return avail_remaining;
752}
753
de355530 754boolean_t pmap_next_page(vm_offset_t *addrp)
1c79356b 755{
de355530
A
756 /* Non optimal, but only used for virtual memory startup.
757 * Allocate memory from a table of free physical addresses
758 * If there are no more free entries, too bad. We have two
759 * tables to look through, free_regions[] which holds free
760 * regions from inside pmap_mem_regions[0], and the others...
761 * pmap_mem_regions[1..]
762 */
763
764 /* current_free_region indicates the next free entry,
765 * if it's less than free_regions_count, then we're still
766 * in free_regions, otherwise we're in pmap_mem_regions
767 */
1c79356b 768
de355530
A
769 if (current_free_region >= free_regions_count) {
770 /* We're into the pmap_mem_regions, handle this
771 * separately to free_regions
772 */
773
774 int current_pmap_mem_region = current_free_region -
775 free_regions_count + 1;
776 if (current_pmap_mem_region > pmap_mem_regions_count)
777 return FALSE;
778 *addrp = avail_next;
779 avail_next += PAGE_SIZE;
780 avail_remaining--;
781 if (avail_next >= pmap_mem_regions[current_pmap_mem_region].end) {
782 current_free_region++;
783 current_pmap_mem_region++;
784 avail_next = pmap_mem_regions[current_pmap_mem_region].start;
785#if DEBUG
786 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
787#endif /* DEBUG */
788 }
789 return TRUE;
1c79356b 790 }
d7e50217 791
de355530
A
792 /* We're in the free_regions, allocate next page and increment
793 * counters
794 */
795 *addrp = avail_next;
796
797 avail_next += PAGE_SIZE;
798 avail_remaining--;
799
800 if (avail_next >= free_regions[current_free_region].end) {
801 current_free_region++;
802 if (current_free_region < free_regions_count)
803 avail_next = free_regions[current_free_region].start;
804 else
805 avail_next = pmap_mem_regions[current_free_region -
806 free_regions_count + 1].start;
807#if DEBUG
808 kprintf("pmap_next_page : next region start=0x%08x\n",avail_next);
809#endif
810 }
1c79356b
A
811 return TRUE;
812}
813
814void pmap_virtual_space(
815 vm_offset_t *startp,
816 vm_offset_t *endp)
817{
de355530
A
818 *startp = round_page(first_free_virt);
819 *endp = VM_MAX_KERNEL_ADDRESS;
1c79356b
A
820}
821
822/*
823 * pmap_create
824 *
825 * Create and return a physical map.
826 *
827 * If the size specified for the map is zero, the map is an actual physical
828 * map, and may be referenced by the hardware.
829 *
830 * A pmap is either in the free list or in the in-use list. The only use
831 * of the in-use list (aside from debugging) is to handle the VSID wrap situation.
832 * Whenever a new pmap is allocated (i.e., not recovered from the free list). The
833 * in-use list is matched until a hole in the VSID sequence is found. (Note
834 * that the in-use pmaps are queued in VSID sequence order.) This is all done
835 * while free_pmap_lock is held.
836 *
837 * If the size specified is non-zero, the map will be used in software
838 * only, and is bounded by that size.
839 */
840pmap_t
841pmap_create(vm_size_t size)
842{
843 pmap_t pmap, ckpmap, fore, aft;
844 int s, i;
de355530
A
845 space_t sid;
846 unsigned int currSID;
847
848#if PMAP_LOWTRACE
849 dbgTrace(0xF1D00001, size, 0); /* (TEST/DEBUG) */
850#endif
851
852#if DEBUG
853 if (pmdebug & PDB_USER)
854 kprintf("pmap_create(size=%x)%c", size, size ? '\n' : ' ');
855#endif
1c79356b
A
856
857 /*
858 * A software use-only map doesn't even need a pmap structure.
859 */
860 if (size)
861 return(PMAP_NULL);
862
863 /*
864 * If there is a pmap in the pmap free list, reuse it.
865 * Note that we use free_pmap_list for all chaining of pmaps, both to
866 * the free list and the in use chain (anchored from kernel_pmap).
867 */
868 s = splhigh();
869 simple_lock(&free_pmap_lock);
870
de355530
A
871 if(free_pmap_list) { /* Any free? */
872 pmap = free_pmap_list; /* Yes, allocate it */
873 free_pmap_list = (pmap_t)pmap->bmaps; /* Dequeue this one (we chain free ones through bmaps) */
1c79356b
A
874 free_pmap_count--;
875 }
876 else {
de355530 877 simple_unlock(&free_pmap_lock); /* Unlock just in case */
1c79356b
A
878 splx(s);
879
de355530 880 pmap = (pmap_t) zalloc(pmap_zone); /* Get one */
1c79356b
A
881 if (pmap == PMAP_NULL) return(PMAP_NULL); /* Handle out-of-memory condition */
882
de355530 883 bzero((char *)pmap, pmapSize); /* Clean up the pmap */
1c79356b
A
884
885 s = splhigh();
de355530 886 simple_lock(&free_pmap_lock); /* Lock it back up */
1c79356b 887
de355530
A
888 ckpmap = cursor_pmap; /* Get starting point for free ID search */
889 currSID = ckpmap->spaceNum; /* Get the actual space ID number */
1c79356b 890
de355530 891 while(1) { /* Keep trying until something happens */
1c79356b 892
de355530 893 currSID = (currSID + 1) & SID_MAX; /* Get the next in the sequence */
1c79356b
A
894 ckpmap = (pmap_t)ckpmap->pmap_link.next; /* On to the next in-use pmap */
895
896 if(ckpmap->spaceNum != currSID) break; /* If we are out of sequence, this is free */
897
de355530
A
898 if(ckpmap == cursor_pmap) { /* See if we have 2^20 already allocated */
899 panic("pmap_create: Maximum number (2^20) active address spaces reached\n"); /* Die pig dog */
1c79356b
A
900 }
901 }
902
de355530
A
903 pmap->space = (currSID * incrVSID) & SID_MAX; /* Calculate the actual VSID */
904 pmap->spaceNum = currSID; /* Set the space ID number */
905
1c79356b
A
906/*
907 * Now we link into the chain just before the out of sequence guy.
908 */
909
de355530
A
910 fore = (pmap_t)ckpmap->pmap_link.prev; /* Get the current's previous */
911 pmap->pmap_link.next = (queue_t)ckpmap; /* My next points to the current */
912 fore->pmap_link.next = (queue_t)pmap; /* Current's previous's next points to me */
913 pmap->pmap_link.prev = (queue_t)fore; /* My prev points to what the current pointed to */
914 ckpmap->pmap_link.prev = (queue_t)pmap; /* Current's prev points to me */
1c79356b
A
915
916 simple_lock_init(&pmap->lock, ETAP_VM_PMAP);
de355530 917 pmap->pmapvr = (unsigned int)pmap ^ (unsigned int)pmap_extract(kernel_pmap, (vm_offset_t)pmap); /* Get physical pointer to the pmap and make mask */
1c79356b
A
918 }
919 pmap->ref_count = 1;
920 pmap->stats.resident_count = 0;
921 pmap->stats.wired_count = 0;
de355530
A
922 pmap->bmaps = 0; /* Clear block map pointer to 0 */
923 pmap->vflags = 0; /* Mark all alternates invalid for now */
924 for(i=0; i < 128; i++) { /* Clean out usage slots */
925 pmap->pmapUsage[i] = 0;
926 }
927 for(i=0; i < 16; i++) { /* Initialize for laughs */
928 pmap->pmapSegs[i] = SEG_REG_PROT | (i << 20) | pmap->space;
929 }
930
931#if PMAP_LOWTRACE
932 dbgTrace(0xF1D00002, (unsigned int)pmap, (unsigned int)pmap->space); /* (TEST/DEBUG) */
933#endif
934
935#if DEBUG
936 if (pmdebug & PDB_USER)
937 kprintf("-> %x, space id = %d\n", pmap, pmap->space);
938#endif
d7e50217 939
de355530 940 simple_unlock(&free_pmap_lock);
1c79356b
A
941 splx(s);
942 return(pmap);
943}
944
945/*
946 * pmap_destroy
947 *
948 * Gives up a reference to the specified pmap. When the reference count
949 * reaches zero the pmap structure is added to the pmap free list.
950 *
951 * Should only be called if the map contains no valid mappings.
952 */
953void
954pmap_destroy(pmap_t pmap)
955{
956 int ref_count;
957 spl_t s;
958 pmap_t fore, aft;
959
de355530
A
960#if PMAP_LOWTRACE
961 dbgTrace(0xF1D00003, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
962#endif
963
964#if DEBUG
965 if (pmdebug & PDB_USER)
966 kprintf("pmap_destroy(pmap=%x)\n", pmap);
967#endif
968
1c79356b
A
969 if (pmap == PMAP_NULL)
970 return;
971
972 ref_count=hw_atomic_sub(&pmap->ref_count, 1); /* Back off the count */
973 if(ref_count>0) return; /* Still more users, leave now... */
974
975 if(ref_count < 0) /* Did we go too far? */
976 panic("pmap_destroy(): ref_count < 0");
977
978#ifdef notdef
979 if(pmap->stats.resident_count != 0)
980 panic("PMAP_DESTROY: pmap not empty");
981#else
982 if(pmap->stats.resident_count != 0) {
de355530 983 pmap_remove(pmap, 0, 0xFFFFF000);
1c79356b
A
984 }
985#endif
986
987 /*
988 * Add the pmap to the pmap free list.
989 */
990
991 s = splhigh();
992 /*
993 * Add the pmap to the pmap free list.
994 */
995 simple_lock(&free_pmap_lock);
996
de355530 997 if (free_pmap_count <= free_pmap_max) { /* Do we have enough spares? */
1c79356b 998
de355530 999 pmap->bmaps = (struct blokmap *)free_pmap_list; /* Queue in front */
1c79356b
A
1000 free_pmap_list = pmap;
1001 free_pmap_count++;
1002 simple_unlock(&free_pmap_lock);
1003
1004 } else {
1005 if(cursor_pmap == pmap) cursor_pmap = (pmap_t)pmap->pmap_link.prev; /* If we are releasing the cursor, back up */
1006 fore = (pmap_t)pmap->pmap_link.prev;
1007 aft = (pmap_t)pmap->pmap_link.next;
1008 fore->pmap_link.next = pmap->pmap_link.next; /* My previous's next is my next */
1009 aft->pmap_link.prev = pmap->pmap_link.prev; /* My next's previous is my previous */
1010 simple_unlock(&free_pmap_lock);
1011 zfree(pmap_zone, (vm_offset_t) pmap);
1012 }
1013 splx(s);
1014}
1015
1016/*
1017 * pmap_reference(pmap)
1018 * gains a reference to the specified pmap.
1019 */
1020void
1021pmap_reference(pmap_t pmap)
1022{
1023 spl_t s;
1024
de355530
A
1025#if PMAP_LOWTRACE
1026 dbgTrace(0xF1D00004, (unsigned int)pmap, 0); /* (TEST/DEBUG) */
1027#endif
1028
1029#if DEBUG
1030 if (pmdebug & PDB_USER)
1031 kprintf("pmap_reference(pmap=%x)\n", pmap);
1032#endif
1033
1c79356b
A
1034 if (pmap != PMAP_NULL) hw_atomic_add(&pmap->ref_count, 1); /* Bump the count */
1035}
1036
0b4e3aa0
A
1037/*
1038 * pmap_remove_some_phys
1039 *
1040 * Removes mappings of the associated page from the specified pmap
1041 *
1042 */
1043void pmap_remove_some_phys(
1044 pmap_t pmap,
1045 vm_offset_t pa)
1046{
1047 register struct phys_entry *pp;
de355530 1048 register struct mapping *mp, *mpv;
0b4e3aa0
A
1049
1050
de355530 1051 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
0b4e3aa0 1052
de355530
A
1053 pp = pmap_find_physentry(pa); /* Get the physent for this page */
1054 if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */
0b4e3aa0 1055
de355530
A
1056 if (pmap->vflags & pmapVMhost)
1057 mapping_purge(pp);
1058 else
1059 mapping_purge_pmap(pp, pmap);
1060
1061 return; /* Leave... */
0b4e3aa0
A
1062}
1063
1c79356b
A
1064/*
1065 * pmap_remove(pmap, s, e)
1066 * unmaps all virtual addresses v in the virtual address
1067 * range determined by [s, e) and pmap.
1068 * s and e must be on machine independent page boundaries and
1069 * s must be less than or equal to e.
1070 *
1071 * Note that pmap_remove does not remove any mappings in nested pmaps. We just
1072 * skip those segments.
1073 */
1074void
1075pmap_remove(
1076 pmap_t pmap,
de355530
A
1077 vm_offset_t sva,
1078 vm_offset_t eva)
1c79356b 1079{
de355530
A
1080 spl_t spl;
1081 struct mapping *mp, *blm;
1082 vm_offset_t lpage;
1083
1084#if PMAP_LOWTRACE
1085 dbgTrace(0xF1D00005, (unsigned int)pmap, sva|((eva-sva)>>12)); /* (TEST/DEBUG) */
1086#endif
1c79356b 1087
de355530
A
1088#if DEBUG
1089 if (pmdebug & PDB_USER)
1090 kprintf("pmap_remove(pmap=%x, sva=%x, eva=%x)\n",
1091 pmap, sva, eva);
1092#endif
1c79356b 1093
de355530
A
1094 if (pmap == PMAP_NULL)
1095 return;
1c79356b
A
1096
1097 /* It is just possible that eva might have wrapped around to zero,
1098 * and sometimes we get asked to liberate something of size zero
1099 * even though it's dumb (eg. after zero length read_overwrites)
1100 */
1101 assert(eva >= sva);
1102
1103 /* If these are not page aligned the loop might not terminate */
de355530
A
1104 assert((sva == trunc_page(sva)) && (eva == trunc_page(eva)));
1105
1106 /* We liberate addresses from high to low, since the stack grows
1107 * down. This means that we won't need to test addresses below
1108 * the limit of stack growth
1109 */
1110
1111 debugLog2(44, sva, eva); /* Log pmap_map call */
1112
1113 sva = trunc_page(sva); /* Make it clean */
1114 lpage = trunc_page(eva) - PAGE_SIZE; /* Point to the last page contained in the range */
1115
1116/*
1117 * Here we will remove all of the block mappings that overlap this range.
1118 * hw_rem_blk removes one mapping in the range and returns. If it returns
1119 * 0, there are no blocks in the range.
1120 */
1121
1122 while(mp = (mapping *)hw_rem_blk(pmap, sva, lpage)) { /* Keep going until no more */
1123 if((unsigned int)mp & 1) { /* Make sure we don't unmap a permanent one */
1124 blm = (struct mapping *)hw_cpv((mapping *)((unsigned int)mp & 0xFFFFFFFC)); /* Get virtual address */
1125 panic("mapping_remove: attempt to unmap a permanent mapping - pmap = %08X, va = %08X, mapping = %08X\n",
1126 pmap, sva, blm);
1127 }
1128 if (!((unsigned int)mp & 2))
1129 mapping_free(hw_cpv(mp)); /* Release it */
1130 }
1131 while (pmap->stats.resident_count && (eva > sva)) {
1c79356b 1132
de355530 1133 eva -= PAGE_SIZE; /* Back up a page */
1c79356b 1134
de355530
A
1135#if 1
1136 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1137 panic("pmap_remove: attempt to remove nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1138#endif
1139 if(!(pmap->pmapUsage[(eva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1140 eva = eva & (-pmapUsageSize); /* Back up into the previous slot */
1141 continue; /* Check the next... */
1142 }
1143 mapping_remove(pmap, eva); /* Remove the mapping for this address */
1c79356b
A
1144 }
1145
de355530 1146 debugLog2(45, 0, 0); /* Log pmap_map call */
1c79356b
A
1147}
1148
1149/*
1150 * Routine:
1151 * pmap_page_protect
1152 *
1153 * Function:
1154 * Lower the permission for all mappings to a given page.
1155 */
1156void
1157pmap_page_protect(
de355530 1158 vm_offset_t pa,
1c79356b
A
1159 vm_prot_t prot)
1160{
1161 register struct phys_entry *pp;
1162 boolean_t remove;
1163
1164
de355530
A
1165#if PMAP_LOWTRACE
1166 dbgTrace(0xF1D00006, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1167#endif
1168
1169#if DEBUG
1170 if (pmdebug & PDB_USER)
1171 kprintf("pmap_page_protect(pa=%x, prot=%x)\n", pa, prot);
1172#endif
1173
1174 debugLog2(46, pa, prot); /* Log pmap_page_protect call */
1175
1c79356b
A
1176 switch (prot) {
1177 case VM_PROT_READ:
1178 case VM_PROT_READ|VM_PROT_EXECUTE:
1179 remove = FALSE;
1180 break;
1181 case VM_PROT_ALL:
1182 return;
1183 default:
1184 remove = TRUE;
1185 break;
1186 }
1187
de355530
A
1188 pp = pmap_find_physentry(pa); /* Get the physent for this page */
1189 if (pp == PHYS_NULL) return; /* Leave if not in physical RAM */
1c79356b
A
1190
1191 if (remove) { /* If the protection was set to none, we'll remove all mappings */
de355530 1192 mapping_purge(pp); /* Get rid of them all */
1c79356b 1193
de355530 1194 debugLog2(47, 0, 0); /* Log pmap_map call */
1c79356b
A
1195 return; /* Leave... */
1196 }
de355530
A
1197
1198 /* When we get here, it means that we are to change the protection for a
1199 * physical page.
1200 */
1201
1202 mapping_protect_phys(pp, prot, 0); /* Change protection of all mappings to page. */
1c79356b 1203
de355530 1204 debugLog2(47, 1, 0); /* Log pmap_map call */
1c79356b
A
1205}
1206
1207/*
1208 * pmap_protect(pmap, s, e, prot)
1209 * changes the protection on all virtual addresses v in the
1210 * virtual address range determined by [s, e] and pmap to prot.
1211 * s and e must be on machine independent page boundaries and
1212 * s must be less than or equal to e.
1213 *
1214 * Note that any requests to change the protection of a nested pmap are
1215 * ignored. Those changes MUST be done by calling this with the correct pmap.
1216 */
1217void pmap_protect(
1218 pmap_t pmap,
1219 vm_offset_t sva,
1220 vm_offset_t eva,
1221 vm_prot_t prot)
1222{
de355530
A
1223 spl_t spl;
1224 register struct phys_entry *pp;
1225 register struct mapping *mp, *mpv;
1226
1227#if PMAP_LOWTRACE
1228 dbgTrace(0xF1D00008, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1229#endif
1c79356b 1230
de355530
A
1231#if DEBUG
1232 if (pmdebug & PDB_USER)
1233 kprintf("pmap_protect(pmap=%x, sva=%x, eva=%x, prot=%x)\n", pmap, sva, eva, prot);
1234
1235 assert(sva < eva);
1236#endif
1c79356b
A
1237
1238 if (pmap == PMAP_NULL) return; /* Do nothing if no pmap */
1239
de355530
A
1240 debugLog2(48, sva, eva); /* Log pmap_map call */
1241
1c79356b 1242 if (prot == VM_PROT_NONE) { /* Should we kill the address range?? */
de355530
A
1243 pmap_remove(pmap, sva, eva); /* Yeah, dump 'em */
1244
1245 debugLog2(49, prot, 0); /* Log pmap_map call */
1246
1c79356b
A
1247 return; /* Leave... */
1248 }
1249
de355530
A
1250 sva = trunc_page(sva); /* Start up a page boundary */
1251
1252 while(sva < eva) { /* Step through */
1253
1254 if(!(pmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1255 sva = (sva + pmapUsageSize) &(-pmapUsageSize); /* Jump up into the next slot if nothing here */
1256 if(!sva) break; /* We tried to wrap, kill loop... */
1257 continue; /* Check the next... */
1258 }
1259
1260#if 1
1261 if((0x00008000 >> (sva >> 28)) & pmap->vflags)
1262 panic("pmap_protect: attempt to protect nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, sva); /* (TEST/DEBUG) panic */
1263#endif
1c79356b 1264
de355530
A
1265 mapping_protect(pmap, sva, prot); /* Change the protection on the page */
1266 sva += PAGE_SIZE; /* On to the next page */
1c79356b
A
1267 }
1268
de355530
A
1269 debugLog2(49, prot, 1); /* Log pmap_map call */
1270 return; /* Leave... */
1c79356b
A
1271}
1272
9bccf70c
A
1273
1274
1c79356b
A
1275/*
1276 * pmap_enter
1277 *
1278 * Create a translation for the virtual address (virt) to the physical
1279 * address (phys) in the pmap with the protection requested. If the
1280 * translation is wired then we can not allow a full page fault, i.e.,
1281 * the mapping control block is not eligible to be stolen in a low memory
1282 * condition.
1283 *
1284 * NB: This is the only routine which MAY NOT lazy-evaluate
1285 * or lose information. That is, this routine must actually
1286 * insert this page into the given map NOW.
1287 */
1288void
de355530 1289pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot,
9bccf70c 1290 unsigned int flags, boolean_t wired)
1c79356b 1291{
de355530
A
1292 spl_t spl;
1293 struct mapping *mp;
1294 struct phys_entry *pp;
1c79356b
A
1295 int memattr;
1296
de355530
A
1297#if PMAP_LOWTRACE
1298 dbgTrace(0xF1D00009, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1299 dbgTrace(0xF1D04009, (unsigned int)pa, (unsigned int)prot); /* (TEST/DEBUG) */
1300#endif
1301
1302 if (pmap == PMAP_NULL) return; /* If they gave us no pmap, just leave... */
1c79356b 1303
de355530 1304 debugLog2(50, va, pa); /* Log pmap_map call */
1c79356b 1305
de355530 1306 pp = pmap_find_physentry(pa); /* Get the physent for this physical page */
1c79356b 1307
de355530
A
1308 if((0x00008000 >> (va >> 28)) & pmap->vflags)
1309 panic("pmap_enter: attempt to map into nested vaddr; pmap = %08X, vaddr = %08X\n", pmap, va); /* (TEST/DEBUG) panic */
1c79356b 1310
de355530
A
1311 spl=splhigh(); /* Have to disallow interrupts between the
1312 time we possibly clear a mapping and the time
1313 we get it remapped again. An I/O SLIH could
1314 try to drive an IOR using the page before
1315 we get it mapped (Dude! This was a tough
1316 bug!!!!) */
1c79356b 1317
de355530 1318 mapping_remove(pmap, va); /* Remove any other mapping at this address */
d7e50217 1319
de355530
A
1320 if(flags & VM_WIMG_USE_DEFAULT) {
1321 if(pp) {
1322 /* Set attr to the phys default */
1323 memattr = ((pp->pte1&0x00000078) >> 3);
1324 } else {
1325 memattr = PTE_WIMG_UNCACHED_COHERENT_GUARDED;
1326 }
1327 } else {
1328 memattr = flags & VM_WIMG_MASK;
d7e50217
A
1329 }
1330
d7e50217 1331
de355530
A
1332 /* Make the address mapping */
1333 mp=mapping_make(pmap, pp, va, pa, prot, memattr, 0);
d7e50217 1334
de355530 1335 splx(spl); /* I'm not busy no more - come what may */
d7e50217 1336
de355530
A
1337 debugLog2(51, prot, 0); /* Log pmap_map call */
1338
1339#if DEBUG
1340 if (pmdebug & (PDB_USER|PDB_ENTER))
1341 kprintf("leaving pmap_enter\n");
1342#endif
d7e50217 1343
1c79356b
A
1344}
1345
1346/*
1347 * pmap_extract(pmap, va)
1348 * returns the physical address corrsponding to the
1349 * virtual address specified by pmap and va if the
1350 * virtual address is mapped and 0 if it is not.
1351 */
1352vm_offset_t pmap_extract(pmap_t pmap, vm_offset_t va) {
1353
1354 spl_t spl;
de355530 1355 register struct mapping *mp, *mpv;
1c79356b 1356 register vm_offset_t pa;
de355530
A
1357 unsigned int seg;
1358 pmap_t actpmap;
1c79356b 1359
1c79356b 1360
de355530
A
1361#if PMAP_LOWTRACE
1362 dbgTrace(0xF1D0000B, (unsigned int)pmap, (unsigned int)va); /* (TEST/DEBUG) */
1363#endif
1364#if DEBUG
1365 if (pmdebug & PDB_USER)
1366 kprintf("pmap_extract(pmap=%x, va=%x)\n", pmap, va);
1367#endif
1c79356b 1368
de355530
A
1369 seg = va >> 28; /* Isolate segment */
1370 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1371 else actpmap = pmap; /* Otherwise use the one passed in */
1c79356b 1372
de355530 1373 pa = (vm_offset_t) 0; /* Clear this to 0 */
d7e50217 1374
de355530 1375 debugLog2(52, actpmap->space, va); /* Log pmap_map call */
1c79356b 1376
de355530 1377 spl = splhigh(); /* We can't allow any loss of control here */
d7e50217 1378
de355530
A
1379 if(mp=hw_lock_phys_vir(actpmap->space, va)) { /* Find the mapping for this vaddr and lock physent */
1380 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1381 panic("pmap_extract: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1382 splx(spl); /* Interruptions are cool now */
1383 return 0;
1384 }
d7e50217 1385
de355530
A
1386 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1387 pa = (vm_offset_t)((mpv->PTEr & -PAGE_SIZE) | ((unsigned int)va & (PAGE_SIZE-1))); /* Build the physical address */
1388 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1389 splx(spl); /* Interruptions are cool now */
d7e50217 1390
de355530 1391 debugLog2(53, pa, 0); /* Log pmap_map call */
d7e50217 1392
de355530
A
1393 return pa; /* Return the physical address... */
1394 }
1395
1396 pa = hw_cvp_blk(pmap, va); /* Try to convert odd-sized page (returns 0 if not found) */
1397 /* Note no nested pmaps here */
d7e50217 1398 splx(spl); /* Restore 'rupts */
de355530 1399 debugLog2(53, pa, 0); /* Log pmap_map call */
d7e50217
A
1400 return pa; /* Return physical address or 0 */
1401}
1402
de355530
A
1403/*
1404 * pmap_attribute_cache_sync
1405 * Handle the machine attribute calls which involve sync the prcessor
1406 * cache.
1407 */
1408kern_return_t
1409pmap_attribute_cache_sync(address, size, attribute, value)
1410 vm_offset_t address;
1411 vm_size_t size;
1412 vm_machine_attribute_t attribute;
1413 vm_machine_attribute_val_t* value;
1414{
1415 while(size) {
1416 switch (*value) { /* What type was that again? */
1417 case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */
1418 sync_cache(address, PAGE_SIZE); /* Sync up dem caches */
1419 break; /* Done with this one here... */
1420
1421 case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */
1422 flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */
1423 invalidate_icache(address,
1424 PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1425 break; /* Done with this one here... */
1426
1427 case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */
1428 flush_dcache(address, PAGE_SIZE, TRUE); /* Flush out the data cache */
1429 break; /* Done with this one here... */
1430
1431 case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */
1432 invalidate_icache(address,
1433 PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1434 break; /* Done with this one here... */
1435 }
1436 size -= PAGE_SIZE;
1437 }
1438 return KERN_SUCCESS;;
1439}
1440
9bccf70c 1441
1c79356b
A
1442/*
1443 * pmap_attributes:
1444 *
de355530 1445 * Set/Get special memory attributes; Set is not implemented.
1c79356b
A
1446 *
1447 * Note: 'VAL_GET_INFO' is used to return info about a page.
1448 * If less than 1 page is specified, return the physical page
1449 * mapping and a count of the number of mappings to that page.
1450 * If more than one page is specified, return the number
1451 * of resident pages and the number of shared (more than
1452 * one mapping) pages in the range;
1453 *
1454 */
1455kern_return_t
1456pmap_attribute(pmap, address, size, attribute, value)
1457 pmap_t pmap;
1458 vm_offset_t address;
1459 vm_size_t size;
1460 vm_machine_attribute_t attribute;
1461 vm_machine_attribute_val_t* value;
1462{
de355530
A
1463 spl_t s;
1464 vm_offset_t sva, eva;
1465 vm_offset_t pa;
1466 kern_return_t ret;
1467 register struct mapping *mp, *mpv;
1468 register struct phys_entry *pp;
1469 int total, seg;
1470 pmap_t actpmap;
1471
1472 if (attribute != MATTR_CACHE)
1473 return KERN_INVALID_ARGUMENT;
1474
1475 /* We can't get the caching attribute for more than one page
1476 * at a time
1477 */
1478 if ((*value == MATTR_VAL_GET) &&
1479 (trunc_page(address) != trunc_page(address+size-1)))
1480 return KERN_INVALID_ARGUMENT;
1c79356b 1481
de355530
A
1482 if (pmap == PMAP_NULL)
1483 return KERN_SUCCESS;
1484
1485 sva = trunc_page(address);
1486 eva = round_page(address + size);
1487 ret = KERN_SUCCESS;
1488
1489 debugLog2(54, address, attribute); /* Log pmap_map call */
1490
1491 switch (*value) {
1492 case MATTR_VAL_CACHE_SYNC: /* sync I+D caches */
1493 case MATTR_VAL_CACHE_FLUSH: /* flush from all caches */
1494 case MATTR_VAL_DCACHE_FLUSH: /* flush from data cache(s) */
1495 case MATTR_VAL_ICACHE_FLUSH: /* flush from instr cache(s) */
1496 sva = trunc_page(sva);
1497 s = splhigh();
1498
1499 while (sva < eva) {
1500 seg = sva >> 28; /* Isolate segment */
1501 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1502 else actpmap = pmap; /* Otherwise use the one passed in */
1503
1c79356b 1504/*
de355530 1505 * Note: the following should work ok with nested pmaps because there are not overlayed mappings
1c79356b 1506 */
de355530
A
1507 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1508 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1509 if(!sva) break; /* We tried to wrap, kill loop... */
1510 continue; /* Check the next... */
1511 }
1512
1513 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1514 sva += PAGE_SIZE; /* Point to the next page */
1515 continue; /* Skip if the page is not mapped... */
1516 }
1517
1518 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1519 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1520 continue;
1521 }
1522
1523 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1524 if((unsigned int)mpv->physent) { /* Is there a physical entry? */
1525 pa = (vm_offset_t)mpv->physent->pte1 & -PAGE_SIZE; /* Yes, get the physical address from there */
1526 }
1527 else {
1528 pa = (vm_offset_t)(mpv->PTEr & PAGE_SIZE); /* Otherwise from the mapping */
1529 }
1530
1531 switch (*value) { /* What type was that again? */
1532 case MATTR_VAL_CACHE_SYNC: /* It is sync I+D caches */
1533 sync_cache(pa, PAGE_SIZE); /* Sync up dem caches */
1534 break; /* Done with this one here... */
1535
1536 case MATTR_VAL_CACHE_FLUSH: /* It is flush from all caches */
1537 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1538 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1539 break; /* Done with this one here... */
1540
1541 case MATTR_VAL_DCACHE_FLUSH: /* It is flush from data cache(s) */
1542 flush_dcache(pa, PAGE_SIZE, TRUE); /* Flush out the data cache */
1543 break; /* Done with this one here... */
1544
1545 case MATTR_VAL_ICACHE_FLUSH: /* It is flush from instr cache(s) */
1546 invalidate_icache(pa, PAGE_SIZE, TRUE); /* Flush out the instruction cache */
1547 break; /* Done with this one here... */
1548 }
1549 if(mpv->physent) hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry if it exists*/
1550
1551 sva += PAGE_SIZE; /* Point to the next page */
1552 }
1553 splx(s);
1554 break;
1555
1556 case MATTR_VAL_GET_INFO: /* Get info */
1557 total = 0;
1558 s = splhigh(); /* Lock 'em out */
1559
1560 if (size <= PAGE_SIZE) { /* Do they want just one page */
1561 seg = sva >> 28; /* Isolate segment */
1562 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1563 else actpmap = pmap; /* Otherwise use the one passed in */
1564 if(!(mp = hw_lock_phys_vir(actpmap->space, sva))) { /* Find the mapping for this vaddr and lock physent */
1565 *value = 0; /* Return nothing if no mapping */
1566 }
1567 else {
1568 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1569 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1570 }
1571 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1572 if(pp = mpv->physent) { /* Check for a physical entry */
1573 total = 0; /* Clear the count */
1574 for (mpv = (mapping *)hw_cpv((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS)); mpv != NULL; mpv = hw_cpv(mp->next)) total++; /* Count the mapping */
1575 *value = (vm_machine_attribute_val_t) ((pp->pte1 & -PAGE_SIZE) | total); /* Pass back the physical address and the count of mappings */
1576 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1577 }
1578 else { /* This is the case for an I/O mapped area */
1579 *value = (vm_machine_attribute_val_t) ((mpv->PTEr & -PAGE_SIZE) | 1); /* Pass back the physical address and the count of mappings */
1580 }
1581 }
1582 }
1583 else {
1584 total = 0;
1585 while (sva < eva) {
1586 seg = sva >> 28; /* Isolate segment */
1587 if((0x00008000 >> seg) & pmap->vflags) actpmap = pmap->pmapPmaps[seg]; /* Set nested pmap if there is one */
1588 else actpmap = pmap; /* Otherwise use the one passed in */
1589
1590 if(!(actpmap->pmapUsage[(sva >> pmapUsageShft) & pmapUsageMask])) { /* See if this chunk has anything in it */
1591 sva = (sva + pmapUsageSize) & (-pmapUsageSize); /* Jump up into the next slot if nothing here */
1592 if(!sva) break; /* We tried to wrap, kill loop... */
1593 continue; /* Check the next... */
1594 }
1595 if(mp = hw_lock_phys_vir(actpmap->space, sva)) { /* Find the mapping for this vaddr and lock physent */
1596 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1597 panic("pmap_attribute: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1598 continue;
1599 }
1600 mpv = hw_cpv(mp); /* Get virtual address of mapping */
1601 total += 65536 + (mpv->physent && ((mapping *)((unsigned int)mpv->physent->phys_link & -32))->next); /* Count the "resident" and shared pages */
1602 hw_unlock_bit((unsigned int *)&mpv->physent->phys_link, PHYS_LOCK); /* Clear the physical entry lock */
1603 }
1604 sva += PAGE_SIZE;
1605 }
1606 *value = total;
1607 }
1608 splx(s);
1609 break;
d7e50217 1610
de355530
A
1611 case MATTR_VAL_GET: /* return current value */
1612 case MATTR_VAL_OFF: /* turn attribute off */
1613 case MATTR_VAL_ON: /* turn attribute on */
1614 default:
1615 ret = KERN_INVALID_ARGUMENT;
1616 break;
1c79356b 1617 }
de355530
A
1618
1619 debugLog2(55, 0, 0); /* Log pmap_map call */
1620
1621 return ret;
1c79356b
A
1622}
1623
765c9de3 1624/*
de355530 1625 * pmap_sync_caches_phys(vm_offset_t pa)
765c9de3
A
1626 *
1627 * Invalidates all of the instruction cache on a physical page and
1628 * pushes any dirty data from the data cache for the same physical page
1629 */
1630
de355530 1631void pmap_sync_caches_phys(vm_offset_t pa) {
765c9de3
A
1632
1633 spl_t s;
de355530
A
1634
1635 s = splhigh(); /* No interruptions here */
1636 sync_cache(trunc_page(pa), PAGE_SIZE); /* Sync up dem caches */
1637 splx(s); /* Allow interruptions */
765c9de3
A
1638 return;
1639}
1640
1c79356b
A
1641/*
1642 * pmap_collect
1643 *
1644 * Garbage collects the physical map system for pages that are no longer used.
1645 * It isn't implemented or needed or wanted.
1646 */
1647void
1648pmap_collect(pmap_t pmap)
1649{
1650 return;
1651}
1652
1653/*
1654 * Routine: pmap_activate
1655 * Function:
1656 * Binds the given physical map to the given
1657 * processor, and returns a hardware map description.
1658 * It isn't implemented or needed or wanted.
1659 */
1660void
1661pmap_activate(
1662 pmap_t pmap,
1663 thread_t th,
1664 int which_cpu)
1665{
1666 return;
1667}
1668/*
1669 * pmap_deactivate:
1670 * It isn't implemented or needed or wanted.
1671 */
1672void
1673pmap_deactivate(
1674 pmap_t pmap,
1675 thread_t th,
1676 int which_cpu)
1677{
1678 return;
1679}
1680
de355530
A
1681#if DEBUG
1682
1683/*
1684 * pmap_zero_page
1685 * pmap_copy page
1686 *
1687 * are implemented in movc.s, these
1688 * are just wrappers to help debugging
1689 */
1690
1691extern void pmap_zero_page_assembler(vm_offset_t p);
1692extern void pmap_copy_page_assembler(vm_offset_t src, vm_offset_t dst);
1693
1694/*
1695 * pmap_zero_page(pa)
1696 *
1697 * pmap_zero_page zeros the specified (machine independent) page pa.
1698 */
1699void
1700pmap_zero_page(
1701 vm_offset_t p)
1702{
1703 register struct mapping *mp;
1704 register struct phys_entry *pp;
1705
1706 if (pmdebug & (PDB_USER|PDB_ZERO))
1707 kprintf("pmap_zero_page(pa=%x)\n", p);
1708
1709 /*
1710 * XXX can these happen?
1711 */
1712 if (pmap_find_physentry(p) == PHYS_NULL)
1713 panic("zero_page: physaddr out of range");
1714
1715 pmap_zero_page_assembler(p);
1716}
1717
1718/*
1719 * pmap_copy_page(src, dst)
1720 *
1721 * pmap_copy_page copies the specified (machine independent)
1722 * page from physical address src to physical address dst.
1723 *
1724 * We need to invalidate the cache for address dst before
1725 * we do the copy. Apparently there won't be any mappings
1726 * to the dst address normally.
1727 */
1728void
1729pmap_copy_page(
1730 vm_offset_t src,
1731 vm_offset_t dst)
1732{
1733 register struct phys_entry *pp;
1734
1735 if (pmdebug & (PDB_USER|PDB_COPY))
1736 kprintf("pmap_copy_page(spa=%x, dpa=%x)\n", src, dst);
1737 if (pmdebug & PDB_COPY)
1738 kprintf("pmap_copy_page: phys_copy(%x, %x, %x)\n",
1739 src, dst, PAGE_SIZE);
1740
1741 pmap_copy_page_assembler(src, dst);
1742}
1743#endif /* DEBUG */
1c79356b
A
1744
1745/*
1746 * pmap_pageable(pmap, s, e, pageable)
1747 * Make the specified pages (by pmap, offset)
1748 * pageable (or not) as requested.
1749 *
1750 * A page which is not pageable may not take
1751 * a fault; therefore, its page table entry
1752 * must remain valid for the duration.
1753 *
1754 * This routine is merely advisory; pmap_enter()
1755 * will specify that these pages are to be wired
1756 * down (or not) as appropriate.
1757 *
1758 * (called from vm/vm_fault.c).
1759 */
1760void
1761pmap_pageable(
1762 pmap_t pmap,
1763 vm_offset_t start,
1764 vm_offset_t end,
1765 boolean_t pageable)
1766{
1767
1768 return; /* This is not used... */
1769
1770}
1771/*
1772 * Routine: pmap_change_wiring
de355530 1773 * NOTE USED ANYMORE.
1c79356b
A
1774 */
1775void
1776pmap_change_wiring(
1777 register pmap_t pmap,
1778 vm_offset_t va,
1779 boolean_t wired)
1780{
1781 return; /* This is not used... */
1782}
1783
1784/*
1785 * pmap_modify_pages(pmap, s, e)
1786 * sets the modified bit on all virtual addresses v in the
1787 * virtual address range determined by [s, e] and pmap,
1788 * s and e must be on machine independent page boundaries and
1789 * s must be less than or equal to e.
1790 */
1791void
1792pmap_modify_pages(
1793 pmap_t pmap,
1794 vm_offset_t sva,
1795 vm_offset_t eva)
1796{
1797 spl_t spl;
1798 mapping *mp;
1799
de355530
A
1800#if PMAP_LOWTRACE
1801 dbgTrace(0xF1D00010, (unsigned int)pmap, (unsigned int)(sva|((eva-sva)>>12))); /* (TEST/DEBUG) */
1802#endif
1803
1804#if DEBUG
1805 if (pmdebug & PDB_USER) kprintf("pmap_modify_pages(pmap=%x, sva=%x, eva=%x)\n", pmap, sva, eva);
1806#endif
1c79356b 1807
de355530 1808 if (pmap == PMAP_NULL) return; /* If no pmap, can't do it... */
1c79356b 1809
de355530
A
1810 debugLog2(56, sva, eva); /* Log pmap_map call */
1811
1812 spl=splhigh(); /* Don't bother me */
1813
1814 for ( ; sva < eva; sva += PAGE_SIZE) { /* Cycle through the whole range */
1815 mp = hw_lock_phys_vir(pmap->space, sva); /* Lock the physical entry for this mapping */
1816 if(mp) { /* Did we find one? */
1817 if((unsigned int)mp&1) { /* Did the lock on the phys entry time out? */
1818 panic("pmap_modify_pages: timeout obtaining lock on physical entry\n"); /* Scream bloody murder! */
1819 continue;
1820 }
1821 mp = hw_cpv(mp); /* Convert to virtual addressing */
1822 if(!mp->physent) continue; /* No physical entry means an I/O page, we can't set attributes */
1823 mapping_set_mod(mp->physent); /* Set the modfied bit for this page */
1824 hw_unlock_bit((unsigned int *)&mp->physent->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1c79356b
A
1825 }
1826 }
de355530
A
1827 splx(spl); /* Restore the interrupt level */
1828
1829 debugLog2(57, 0, 0); /* Log pmap_map call */
1830 return; /* Leave... */
1c79356b
A
1831}
1832
1833/*
1834 * pmap_clear_modify(phys)
1835 * clears the hardware modified ("dirty") bit for one
1836 * machine independant page starting at the given
1837 * physical address. phys must be aligned on a machine
1838 * independant page boundary.
1839 */
1840void
1841pmap_clear_modify(vm_offset_t pa)
1842{
de355530
A
1843 register struct phys_entry *pp;
1844 spl_t spl;
1c79356b 1845
de355530
A
1846#if PMAP_LOWTRACE
1847 dbgTrace(0xF1D00011, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1848#endif
1849#if DEBUG
1850 if (pmdebug & PDB_USER)
1851 kprintf("pmap_clear_modify(pa=%x)\n", pa);
1852#endif
1853
1854 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1855 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1856
1857 debugLog2(58, pa, 0); /* Log pmap_map call */
1858
1859 spl=splhigh(); /* Don't bother me */
1c79356b 1860
de355530
A
1861 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1862 panic("pmap_clear_modify: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1863 splx(spl); /* Restore 'rupts */
1864 return; /* Should die before here */
1865 }
1866
1867 mapping_clr_mod(pp); /* Clear all change bits for physical page */
1868
1869 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1870 splx(spl); /* Restore the interrupt level */
1871
1872 debugLog2(59, 0, 0); /* Log pmap_map call */
1c79356b
A
1873}
1874
1875/*
1876 * pmap_is_modified(phys)
1877 * returns TRUE if the given physical page has been modified
1878 * since the last call to pmap_clear_modify().
1879 */
1880boolean_t
1881pmap_is_modified(register vm_offset_t pa)
1882{
de355530
A
1883 register struct phys_entry *pp;
1884 spl_t spl;
1885 boolean_t ret;
1886
1887
1888#if PMAP_LOWTRACE
1889 dbgTrace(0xF1D00012, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1890#endif
1891#if DEBUG
1892 if (pmdebug & PDB_USER)
1893 kprintf("pmap_is_modified(pa=%x)\n", pa);
1894#endif
1895
1896 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1897 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1898
1899 debugLog2(60, pa, 0); /* Log pmap_map call */
1900
1901 spl=splhigh(); /* Don't bother me */
1902
1903 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1904 panic("pmap_is_modified: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1905 splx(spl); /* Restore 'rupts */
1906 return 0; /* Should die before here */
1907 }
1908
1909 ret = mapping_tst_mod(pp); /* Check for modified */
1c79356b 1910
de355530
A
1911 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1912 splx(spl); /* Restore the interrupt level */
1913
1914 debugLog2(61, ret, 0); /* Log pmap_map call */
1915
1916 return ret;
1c79356b
A
1917}
1918
1919/*
1920 * pmap_clear_reference(phys)
1921 * clears the hardware referenced bit in the given machine
1922 * independant physical page.
1923 *
1924 */
1925void
1926pmap_clear_reference(vm_offset_t pa)
1927{
de355530
A
1928 register struct phys_entry *pp;
1929 spl_t spl;
1930
1931
1932#if PMAP_LOWTRACE
1933 dbgTrace(0xF1D00013, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1934#endif
1935#if DEBUG
1936 if (pmdebug & PDB_USER)
1937 kprintf("pmap_clear_reference(pa=%x)\n", pa);
1938#endif
1939
1940 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1941 if (pp == PHYS_NULL) return; /* If there isn't one, just leave... */
1942
1943 debugLog2(62, pa, 0); /* Log pmap_map call */
1944
1945 spl=splhigh(); /* Don't bother me */
1946 mapping_clr_ref(pp); /* Clear all reference bits for physical page */
1947 splx(spl); /* Restore the interrupt level */
1948
1949 debugLog2(63, 0, 0); /* Log pmap_map call */
1950
1c79356b
A
1951}
1952
1953/*
1954 * pmap_is_referenced(phys)
1955 * returns TRUE if the given physical page has been referenced
1956 * since the last call to pmap_clear_reference().
1957 */
1958boolean_t
1959pmap_is_referenced(vm_offset_t pa)
1960{
de355530
A
1961 register struct phys_entry *pp;
1962 spl_t spl;
1963 boolean_t ret;
1c79356b 1964
1c79356b 1965
de355530
A
1966#if PMAP_LOWTRACE
1967 dbgTrace(0xF1D00014, (unsigned int)pa, 0); /* (TEST/DEBUG) */
1968#endif
1969#if DEBUG
1970 if (pmdebug & PDB_USER)
1971 kprintf("pmap_is_referenced(pa=%x)\n", pa);
1972#endif
1973
1974 pp = pmap_find_physentry(pa); /* Find the physent for this page */
1975 if (pp == PHYS_NULL) return(FALSE); /* Just indicate not set... */
1976
1977 debugLog2(64, pa, 0); /* Log pmap_map call */
1978
1979 spl=splhigh(); /* Don't bother me */
1c79356b 1980
de355530
A
1981 if(!hw_lock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK, LockTimeOut)) { /* Try to get the lock on the physical entry */
1982 panic("pmap_is_referenced: Timeout getting lock on physent at %08X\n", pp); /* Arrrgghhhh! */
1983 splx(spl); /* Restore 'rupts */
1984 return 0; /* Should die before here */
1985 }
1986
1987 ret = mapping_tst_ref(pp); /* Check for referenced */
1988
1989 hw_unlock_bit((unsigned int *)&pp->phys_link, PHYS_LOCK); /* Unlock the physical entry */
1990 splx(spl); /* Restore the interrupt level */
1991
1992 debugLog2(65, ret, 0); /* Log pmap_map call */
1c79356b 1993
de355530 1994 return ret;
1c79356b
A
1995}
1996
1997#if MACH_VM_DEBUG
1998int
1999pmap_list_resident_pages(
2000 register pmap_t pmap,
2001 register vm_offset_t *listp,
2002 register int space)
2003{
2004 return 0;
2005}
2006#endif /* MACH_VM_DEBUG */
2007
2008/*
2009 * Locking:
2010 * spl: VM
2011 */
2012void
2013pmap_copy_part_page(
2014 vm_offset_t src,
2015 vm_offset_t src_offset,
2016 vm_offset_t dst,
2017 vm_offset_t dst_offset,
2018 vm_size_t len)
2019{
2020 register struct phys_entry *pp_src, *pp_dst;
2021 spl_t s;
2022
2023
de355530
A
2024#if PMAP_LOWTRACE
2025 dbgTrace(0xF1D00019, (unsigned int)src+src_offset, (unsigned int)dst+dst_offset); /* (TEST/DEBUG) */
2026 dbgTrace(0xF1D04019, (unsigned int)len, 0); /* (TEST/DEBUG) */
2027#endif
2028 s = splhigh();
2029
2030 assert(((dst & PAGE_MASK)+dst_offset+len) <= PAGE_SIZE);
2031 assert(((src & PAGE_MASK)+src_offset+len) <= PAGE_SIZE);
2032
2033 /*
2034 * Since the source and destination are physical addresses,
2035 * turn off data translation to perform a bcopy() in bcopy_phys().
2036 */
2037 phys_copy((vm_offset_t) src+src_offset,
2038 (vm_offset_t) dst+dst_offset, len);
1c79356b 2039
de355530 2040 splx(s);
1c79356b
A
2041}
2042
2043void
2044pmap_zero_part_page(
2045 vm_offset_t p,
2046 vm_offset_t offset,
2047 vm_size_t len)
2048{
2049 panic("pmap_zero_part_page");
2050}
2051
de355530 2052boolean_t pmap_verify_free(vm_offset_t pa) {
1c79356b
A
2053
2054 struct phys_entry *pp;
2055
de355530
A
2056#if PMAP_LOWTRACE
2057 dbgTrace(0xF1D00007, (unsigned int)pa, 0); /* (TEST/DEBUG) */
2058#endif
2059
2060#if DEBUG
2061 if (pmdebug & PDB_USER)
2062 kprintf("pmap_verify_free(pa=%x)\n", pa);
2063#endif
2064
2065 if (!pmap_initialized) return(TRUE);
1c79356b 2066
de355530
A
2067 pp = pmap_find_physentry(pa); /* Look up the physical entry */
2068 if (pp == PHYS_NULL) return FALSE; /* If there isn't one, show no mapping... */
2069 return ((mapping *)((unsigned int)pp->phys_link & ~PHYS_FLAGS) == MAPPING_NULL); /* Otherwise, return TRUE if mapping exists... */
1c79356b
A
2070}
2071
2072
2073/* Determine if we need to switch space and set up for it if so */
2074
2075void pmap_switch(pmap_t map)
2076{
2077 unsigned int i;
2078
de355530
A
2079#if DEBUG
2080 if (watchacts & WA_PCB) {
2081 kprintf("Switching to map at 0x%08x, space=%d\n",
2082 map,map->space);
2083 }
2084#endif /* DEBUG */
1c79356b
A
2085
2086
2087/* when changing to kernel space, don't bother
2088 * doing anything, the kernel is mapped from here already.
2089 */
2090 if (map->space == PPC_SID_KERNEL) { /* Are we switching into kernel space? */
2091 return; /* If so, we don't do anything... */
2092 }
2093
2094 hw_set_user_space(map); /* Indicate if we need to load the SRs or not */
2095 return; /* Bye, bye, butterfly... */
2096}
2097
2098/*
de355530 2099 * kern_return_t pmap_nest(grand, subord, vaddr, size)
1c79356b
A
2100 *
2101 * grand = the pmap that we will nest subord into
2102 * subord = the pmap that goes into the grand
de355530
A
2103 * vaddr = start of range in pmap to be inserted
2104 * size = size of range in pmap to be inserted
1c79356b
A
2105 *
2106 * Inserts a pmap into another. This is used to implement shared segments.
2107 * On the current PPC processors, this is limited to segment (256MB) aligned
2108 * segment sized ranges.
2109 */
2110
de355530
A
2111kern_return_t pmap_nest(pmap_t grand, pmap_t subord, vm_offset_t vaddr, vm_size_t size) {
2112
2113 unsigned int oflags, seg, grandr;
2114 int i;
d7e50217 2115
de355530
A
2116 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2117 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2118
2119 while(1) { /* Test and set the subordinate flag */
2120 oflags = subord->vflags & ~pmapAltSeg; /* Get old unset value */
2121 if(subord->vflags & pmapAltSeg) { /* Are trying to nest one already nested? */
2122 panic("pmap_nest: Attempt to nest an already nested pmap\n");
2123 }
2124 if(hw_compare_and_store(oflags, oflags | pmapSubord, &subord->vflags)) break; /* Done if we got it set */
1c79356b
A
2125 }
2126
de355530 2127 simple_lock(&grand->lock); /* Lock the superior pmap */
1c79356b 2128
de355530
A
2129 if(grand->vflags & pmapSubord) { /* Are we only one level deep? */
2130 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2131 panic("pmap_nest: Attempt to nest into subordinate pmap\n");
2132 return KERN_FAILURE; /* Shame on you */
2133 }
2134
2135 seg = vaddr >> 28; /* Isolate the segment number */
2136 if((0x00008000 >> seg) & grand->vflags) { /* See if it is already in use */
2137 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2138 panic("pmap_nest: Attempt to nest into already nested segment\n");
2139 return KERN_FAILURE; /* Shame on you */
2140 }
1c79356b 2141
de355530
A
2142 grand->pmapPmaps[seg] = subord; /* Set the pointer to the subordinate */
2143 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | subord->space; /* Set the vsid to the subordinate's vsid */
2144 grand->vflags |= (0x00008000 >> seg); /* Set in-use bit */
2145
2146 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
1c79356b 2147
de355530 2148 simple_unlock(&grand->lock); /* Unlock the grand pmap */
d7e50217 2149
de355530
A
2150
2151/*
2152 * Note that the following will force the segment registers to be reloaded following
2153 * the next interrupt on all processors if they are using the pmap we just changed.
2154 *
2155 */
2156
2157
2158 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
2159 (void)hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap); /* Clear if ours */
1c79356b 2160 }
de355530
A
2161
2162 return KERN_SUCCESS; /* Bye, bye, butterfly... */
1c79356b
A
2163}
2164
de355530 2165
1c79356b 2166/*
de355530 2167 * kern_return_t pmap_unnest(grand, vaddr, size)
1c79356b
A
2168 *
2169 * grand = the pmap that we will nest subord into
de355530
A
2170 * vaddr = start of range in pmap to be inserted
2171 * size = size of range in pmap to be inserted
1c79356b
A
2172 *
2173 * Removes a pmap from another. This is used to implement shared segments.
2174 * On the current PPC processors, this is limited to segment (256MB) aligned
2175 * segment sized ranges.
2176 */
2177
de355530 2178kern_return_t pmap_unnest(pmap_t grand, vm_offset_t vaddr, vm_size_t size) {
1c79356b
A
2179
2180 unsigned int oflags, seg, grandr, tstamp;
2181 int i, tcpu, mycpu;
2182
de355530
A
2183 if(size != 0x10000000) return KERN_INVALID_VALUE; /* We can only do this for 256MB for now */
2184 if(vaddr & 0x0FFFFFFF) return KERN_INVALID_VALUE; /* We can only do this aligned to 256MB */
2185
2186 simple_lock(&grand->lock); /* Lock the superior pmap */
2187 disable_preemption(); /* It's all for me! */
2188
2189 seg = vaddr >> 28; /* Isolate the segment number */
2190 if(!((0x00008000 >> seg) & grand->vflags)) { /* See if it is already in use */
2191 enable_preemption(); /* Ok, your turn */
2192 simple_unlock(&grand->lock); /* Unlock the superior pmap */
2193 panic("pmap_unnest: Attempt to unnest an unnested segment\n");
2194 return KERN_FAILURE; /* Shame on you */
1c79356b
A
2195 }
2196
de355530
A
2197 grand->pmapPmaps[seg] = (pmap_t)0; /* Clear the pointer to the subordinate */
2198 grand->pmapSegs[seg] = grand->space; /* Set the pointer to the subordinate's vsid */
2199 grand->pmapSegs[seg] = SEG_REG_PROT | (seg << 20) | grand->space; /* Set the vsid to the grand's vsid */
2200 grand->vflags &= ~(0x00008000 >> seg); /* Clear in-use bit */
1c79356b 2201
de355530 2202 grandr = (unsigned int)grand ^ grand->pmapvr; /* Get real address of the grand pmap */
1c79356b 2203
de355530 2204 simple_unlock(&grand->lock); /* Unlock the superior pmap */
1c79356b
A
2205
2206/*
2207 * Note that the following will force the segment registers to be reloaded
2208 * on all processors (if they are using the pmap we just changed) before returning.
2209 *
2210 * This is needed. The reason is that until the segment register is
2211 * reloaded, another thread in the same task on a different processor will
2212 * be able to access memory that it isn't allowed to anymore. That can happen
2213 * because access to the subordinate pmap is being removed, but the pmap is still
2214 * valid.
2215 *
2216 * Note that we only kick the other processor if we see that it was using the pmap while we
2217 * were changing it.
2218 */
2219
2220
de355530
A
2221 mycpu = cpu_number(); /* Who am I? Am I just a dream? */
2222 for(i=0; i < real_ncpus; i++) { /* Cycle through processors */
2223 if(hw_compare_and_store((unsigned int)grandr, 0, &per_proc_info[i].Lastpmap)) { /* Clear if ours and kick the other guy if he was using it */
2224 if(i == mycpu) continue; /* Don't diddle ourselves */
2225 tstamp = per_proc_info[i].ruptStamp[1]; /* Save the processor's last interrupt time stamp */
0b4e3aa0
A
2226 if(cpu_signal(i, SIGPwake, 0, 0) != KERN_SUCCESS) { /* Make sure we see the pmap change */
2227 continue;
1c79356b 2228 }
0b4e3aa0 2229 if(!hw_cpu_wcng(&per_proc_info[i].ruptStamp[1], tstamp, LockTimeOut)) { /* Wait for the other processors to enter debug */
1c79356b
A
2230 panic("pmap_unnest: Other processor (%d) did not see interruption request\n", i);
2231 }
2232 }
2233 }
2234
de355530
A
2235 enable_preemption(); /* Others can run now */
2236 return KERN_SUCCESS; /* Bye, bye, butterfly... */
1c79356b
A
2237}
2238
2239
de355530 2240void pmap_ver(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) {
1c79356b 2241
de355530
A
2242 int cnt, i, j, k;
2243 vm_offset_t xx;
1c79356b 2244
de355530 2245 if(!pmap) return;
d7e50217 2246
de355530
A
2247 sva = trunc_page(sva);
2248 eva = trunc_page(eva);
d7e50217 2249
de355530
A
2250 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2251 if((pmap->pmapUsage[i]) > 8192) { /* See if this is a sane number */
2252 panic("pmap_ver: pmapUsage slot for %08X has invalid count (%d) for pmap %08X\n",
2253 i * pmapUsageSize, pmap->pmapUsage[i], pmap);
2254 }
1c79356b 2255 }
de355530
A
2256 j = 0;
2257 while(1) { /* Try multiple times */
2258 cnt = 0;
2259 for(i = 0; i < (pmapUsageMask + 1); i++) { /* Step through them all */
2260 cnt = cnt + pmap->pmapUsage[i]; /* Sum all slots */
2261 }
2262 if(cnt == pmap->stats.resident_count) break; /* We're ok if we match... */
d7e50217 2263
de355530
A
2264 j++;
2265 for(i = 0; i < 100000; i++) {
2266 k = j + i;
2267 }
2268 if(j >= 10) {
2269 panic("pmap_ver: pmapUsage total (%d) does not match resident count (%d) for pmap %08X\n",
2270 cnt, pmap->stats.resident_count, pmap);
2271 }
1c79356b 2272 }
d7e50217 2273
de355530
A
2274 for(xx = sva; xx < eva; xx += PAGE_SIZE) { /* See if any slots not clear */
2275 if(pmap_extract(pmap, xx)) {
2276 panic("pmap_ver: range (%08X to %08X) not empty at %08X for pmap %08X\n",
2277 sva, eva, xx, pmap);
2278 }
1c79356b
A
2279 }
2280}
2281
2282
9bccf70c
A
2283/* temporary workaround */
2284boolean_t
2285coredumpok(vm_map_t map, vm_offset_t va)
2286{
2287 return TRUE;
2288}
de355530 2289