]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/loose_ends.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / i386 / loose_ends.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 #include <mach_assert.h>
56
57 #include <string.h>
58 #include <mach/boolean.h>
59 #include <mach/i386/vm_types.h>
60 #include <mach/i386/vm_param.h>
61 #include <kern/kern_types.h>
62 #include <kern/misc_protos.h>
63 #include <vm/pmap.h>
64 #include <i386/param.h>
65 #include <i386/misc_protos.h>
66
67 #define value_64bit(value) ((value) & 0xFFFFFFFF00000000LL)
68 #define low32(x) ((unsigned int)((x) & 0x00000000FFFFFFFFLL))
69
70 /*
71 * Should be rewritten in asm anyway.
72 */
73
74
75 void
76 bzero_phys(addr64_t p, uint32_t len)
77 {
78 bzero((char *)phystokv(low32(p)), len);
79 }
80
81 /*
82 * copy 'size' bytes from physical to physical address
83 * the caller must validate the physical ranges
84 *
85 * if flush_action == 0, no cache flush necessary
86 * if flush_action == 1, flush the source
87 * if flush_action == 2, flush the dest
88 * if flush_action == 3, flush both source and dest
89 */
90
91 extern void flush_dcache(vm_offset_t addr, unsigned count, int phys);
92
93 kern_return_t copyp2p(vm_offset_t source, vm_offset_t dest, unsigned int size, unsigned int flush_action) {
94
95 switch(flush_action) {
96 case 1:
97 flush_dcache(source, size, 1);
98 break;
99 case 2:
100 flush_dcache(dest, size, 1);
101 break;
102 case 3:
103 flush_dcache(source, size, 1);
104 flush_dcache(dest, size, 1);
105 break;
106
107 }
108 bcopy_phys((addr64_t)source, (addr64_t)dest, (vm_size_t)size); /* Do a physical copy */
109
110 switch(flush_action) {
111 case 1:
112 flush_dcache(source, size, 1);
113 break;
114 case 2:
115 flush_dcache(dest, size, 1);
116 break;
117 case 3:
118 flush_dcache(source, size, 1);
119 flush_dcache(dest, size, 1);
120 break;
121
122 }
123 return KERN_SUCCESS;
124 }
125
126
127
128 /*
129 * Copies data from a physical page to a virtual page. This is used to
130 * move data from the kernel to user state.
131 *
132 */
133 #if 0
134 kern_return_t
135 copyp2v(char *from, char *to, unsigned int size) {
136
137 return(copyout(phystokv(from), to, size));
138 }
139 #endif
140
141 /*
142 * Copies data from a virtual page to a physical page. This is used to
143 * move data from the user address space into the kernel.
144 *
145 */
146 #if 0
147 kern_return_t
148 copyv2p(char *from, char *to, unsigned int size) {
149
150 return(copyin(from, phystokv(to), size));
151 }
152 #endif
153
154 /*
155 * bcopy_phys - like bcopy but copies from/to physical addresses.
156 * this is trivial since all phys mem is mapped into
157 * kernel virtual space
158 */
159
160 void
161 bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes)
162 {
163 /* this will die horribly if we ever run off the end of a page */
164 if ( value_64bit(from) || value_64bit(to)) panic("bcopy_phys: 64 bit value");
165 bcopy((char *)phystokv(low32(from)),
166 (char *)phystokv(low32(to)), bytes);
167 }
168
169
170 /*
171 * ovbcopy - like bcopy, but recognizes overlapping ranges and handles
172 * them correctly.
173 */
174
175 void
176 ovbcopy(
177 const char *from,
178 char *to,
179 vm_size_t bytes) /* num bytes to copy */
180 {
181 /* Assume that bcopy copies left-to-right (low addr first). */
182 if (from + bytes <= to || to + bytes <= from || to == from)
183 bcopy_no_overwrite(from, to, bytes); /* non-overlapping or no-op*/
184 else if (from > to)
185 bcopy_no_overwrite(from, to, bytes); /* overlapping but OK */
186 else {
187 /* to > from: overlapping, and must copy right-to-left. */
188 from += bytes - 1;
189 to += bytes - 1;
190 while (bytes-- > 0)
191 *to-- = *from--;
192 }
193 }
194
195 void
196 bcopy(
197 const char *from,
198 char *to,
199 vm_size_t bytes) /* num bytes to copy */
200 {
201 ovbcopy(from, to, bytes);
202 }
203
204 int bcmp(
205 const char *a,
206 const char *b,
207 vm_size_t len)
208 {
209 if (len == 0)
210 return 0;
211
212 do
213 if (*a++ != *b++)
214 break;
215 while (--len);
216
217 return len;
218 }
219
220 int
221 memcmp(s1, s2, n)
222 register char *s1, *s2;
223 register n;
224 {
225 while (--n >= 0)
226 if (*s1++ != *s2++)
227 return (*--s1 - *--s2);
228 return (0);
229 }
230
231 /*
232 * Abstract:
233 * strlen returns the number of characters in "string" preceeding
234 * the terminating null character.
235 */
236
237 size_t
238 strlen(
239 register const char *string)
240 {
241 register const char *ret = string;
242
243 while (*string++ != '\0')
244 continue;
245 return string - 1 - ret;
246 }
247
248 #include <libkern/OSAtomic.h>
249
250 uint32_t
251 hw_atomic_add(
252 uint32_t *dest,
253 uint32_t delt)
254 {
255 uint32_t oldValue;
256 uint32_t newValue;
257
258 do {
259 oldValue = *dest;
260 newValue = (oldValue + delt);
261 } while (!OSCompareAndSwap((UInt32)oldValue,
262 (UInt32)newValue, (UInt32 *)dest));
263
264 return newValue;
265 }
266
267 uint32_t
268 hw_atomic_sub(
269 uint32_t *dest,
270 uint32_t delt)
271 {
272 uint32_t oldValue;
273 uint32_t newValue;
274
275 do {
276 oldValue = *dest;
277 newValue = (oldValue - delt);
278 } while (!OSCompareAndSwap((UInt32)oldValue,
279 (UInt32)newValue, (UInt32 *)dest));
280
281 return newValue;
282 }
283
284 uint32_t
285 hw_atomic_or(
286 uint32_t *dest,
287 uint32_t mask)
288 {
289 uint32_t oldValue;
290 uint32_t newValue;
291
292 do {
293 oldValue = *dest;
294 newValue = (oldValue | mask);
295 } while (!OSCompareAndSwap((UInt32)oldValue,
296 (UInt32)newValue, (UInt32 *)dest));
297
298 return newValue;
299 }
300
301 uint32_t
302 hw_atomic_and(
303 uint32_t *dest,
304 uint32_t mask)
305 {
306 uint32_t oldValue;
307 uint32_t newValue;
308
309 do {
310 oldValue = *dest;
311 newValue = (oldValue & mask);
312 } while (!OSCompareAndSwap((UInt32)oldValue,
313 (UInt32)newValue, (UInt32 *)dest));
314
315 return newValue;
316 }
317
318 uint32_t
319 hw_compare_and_store(
320 uint32_t oldval,
321 uint32_t newval,
322 uint32_t *dest)
323 {
324 return OSCompareAndSwap((UInt32)oldval, (UInt32)newval, (UInt32 *)dest);
325 }
326
327 #if MACH_ASSERT
328
329 /*
330 * Machine-dependent routine to fill in an array with up to callstack_max
331 * levels of return pc information.
332 */
333 void machine_callstack(
334 natural_t *buf,
335 vm_size_t callstack_max)
336 {
337 }
338
339 #endif /* MACH_ASSERT */
340
341
342
343
344 void fillPage(ppnum_t pa, unsigned int fill)
345 {
346 unsigned int *addr = (unsigned int *)phystokv(i386_ptob(pa));
347 int i;
348 int cnt = NBPG/sizeof(unsigned int);
349
350 for (i = 0; i < cnt ; i++ )
351 *addr++ = fill;
352 }
353
354 #define cppvPHYS (cppvPsnk|cppvPsrc)
355
356 kern_return_t copypv(addr64_t source, addr64_t sink, unsigned int size, int which)
357 {
358 char *src32, *dst32;
359
360 if (value_64bit(source) | value_64bit(sink)) panic("copypv: 64 bit value");
361
362 src32 = (char *)low32(source);
363 dst32 = (char *)low32(sink);
364
365 if (which & cppvFsrc) flush_dcache(source, size, 1); /* If requested, flush source before move */
366 if (which & cppvFsnk) flush_dcache(sink, size, 1); /* If requested, flush sink before move */
367
368 switch (which & cppvPHYS) {
369
370 case cppvPHYS:
371 /*
372 * both destination and source are physical
373 */
374 bcopy_phys(source, sink, (vm_size_t)size);
375 break;
376
377 case cppvPsnk:
378 /*
379 * destination is physical, source is virtual
380 */
381 if (which & cppvKmap)
382 /*
383 * source is kernel virtual
384 */
385 bcopy(src32, (char *)phystokv(dst32), size);
386 else
387 /*
388 * source is user virtual
389 */
390 copyin(src32, (char *)phystokv(dst32), size);
391 break;
392
393 case cppvPsrc:
394 /*
395 * source is physical, destination is virtual
396 */
397 if (which & cppvKmap)
398 /*
399 * destination is kernel virtual
400 */
401 bcopy((char *)phystokv(src32), dst32, size);
402 else
403 /*
404 * destination is user virtual
405 */
406 copyout((char *)phystokv(src32), dst32, size);
407 break;
408
409 default:
410 panic("copypv: both virtual");
411 }
412
413 if (which & cppvFsrc) flush_dcache(source, size, 1); /* If requested, flush source before move */
414 if (which & cppvFsnk) flush_dcache(sink, size, 1); /* If requested, flush sink before move */
415
416 return KERN_SUCCESS;
417 }
418
419
420 void flush_dcache64(addr64_t addr, unsigned count, int phys)
421 {
422 }
423
424 void invalidate_icache64(addr64_t addr, unsigned cnt, int phys)
425 {
426 }
427
428
429 void switch_to_serial_console(void)
430 {
431 }
432
433 addr64_t vm_last_addr;
434
435 void
436 mapping_set_mod(ppnum_t pn)
437 {
438 pmap_set_modify(pn);
439 }
440
441 boolean_t
442 mutex_preblock(
443 mutex_t *mutex,
444 thread_t thread)
445 {
446 return (FALSE);
447 }