2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 // =============================
24 // BZERO and MEMSET FOR Mac OS X
25 // =============================
27 // We use DCBZ, and therefore are dependent on the cache block size (32.)
28 // Bzero and memset need to be in the same file since they are tightly
29 // coupled, so we can use bzero for memset of 0 without incurring extra
30 // overhead. (The issue is that bzero must preserve r3 for memset.)
33 // r3 = original ptr, not changed since memset returns it
34 // r4 = count of bytes to set ("rc")
35 // r11 = working operand ptr ("rp")
36 // r10 = value to set ("rv")
42 #include <architecture/ppc/asm_help.h>
53 _bzero: // void bzero(void *b, size_t len);
54 cmplwi cr1,rc,32 // too short for DCBZ?
56 Lbzero1: // enter from memset with cr1 and rv set up
57 neg r5,r3 // start to compute bytes to align
58 mr rp,r3 // make copy of operand ptr
59 andi. r6,r5,0x1F // r6 <- bytes to align on cache block
60 blt- cr1,Ltail // <32, so skip DCBZs
61 beq- cr0,Ldcbz // already aligned
63 // align on 32-byte boundary
65 mtcrf 0x01,r6 // move length to cr7 (faster if only 1 cr)
66 andi. r7,r6,16 // test bit 27 by hand
67 sub rc,rc,r6 // adjust length
68 bf 31,1f // test bits of count
92 // DCBZ 32-byte cache blocks
94 srwi. r5,rc,5 // r5 <- number of cache blocks to zero
96 mtctr r5 // set up loop count
97 andi. rc,rc,0x1F // will there be leftovers?
99 dcbz 0,rp // zero 32 bytes
102 beqlr // no leftovers so done
104 // store up to 31 trailing bytes
105 // rv = value to store (in all 4 bytes)
106 // rc = #bytes to store (0..31)
108 andi. r5,rc,16 // bit 27 set in length?
109 mtcrf 0x01,rc // low 4 bits of length to cr7
110 beq 1f // test bits of length
140 _memset: // void * memset(void *b, int c, size_t len);
141 andi. rv,r4,0xFF // copy value to working register, test for 0
142 mr rc,r5 // move length to working register
143 cmplwi cr1,r5,32 // length < 32 ?
144 beq Lbzero1 // memset of 0 is just a bzero
145 rlwimi rv,rv,8,16,23 // replicate value to low 2 bytes
146 mr rp,r3 // make working copy of operand ptr
147 rlwimi rv,rv,16,0,15 // value now in all 4 bytes
148 blt cr1,Ltail // length<32, so use common tail routine
149 neg r5,rp // start to compute #bytes to align
150 andi. r6,r5,0x7 // r6 <- #bytes to align on dw
151 beq- Lmemset1 // already aligned
153 ; align on 8-byte boundary
155 mtcrf 0x01,r6 // move count to cr7 (faster if only 1 cr)
156 sub rc,rc,r6 // adjust length
169 // loop on 16-byte blocks
171 stw rv,0(rp) // store first 8 bytes from rv
173 srwi r5,rc,4 // r5 <- #blocks (>=1)
174 mtcrf 0x01,rc // leftover length to cr7
175 mtctr r5 // set up loop count
176 lfd f0,0(rp) // pick up in a fp register
177 b 2f // enter loop in middle
179 1: // loop on 16-byte blocks
186 // store up to 16 trailing bytes (count in cr7)