]>
Commit | Line | Data |
---|---|---|
91447636 A |
1 | /* |
2 | * Copyright (c) 2003 Apple Computer, Inc. All rights reserved. | |
3 | * | |
8f6c56a5 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
91447636 | 5 | * |
8f6c56a5 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
8ad349bb | 24 | * limitations under the License. |
8f6c56a5 A |
25 | * |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
91447636 A |
27 | */ |
28 | ||
29 | #define ASSEMBLER | |
30 | #include <sys/appleapiopts.h> | |
31 | #include <ppc/asm.h> | |
32 | #include <machine/cpu_capabilities.h> | |
33 | #include <machine/commpage.h> | |
34 | ||
35 | .text | |
36 | .align 2 | |
37 | ||
38 | ||
39 | /* ********************* | |
40 | * * M E M S E T _ G 4 * | |
41 | * ********************* | |
42 | * | |
43 | * This is a subroutine called by Libc memset and memset_pattern for large nonzero | |
44 | * operands (zero operands are funneled into bzero.) This version is for | |
45 | * 32-bit processors with a 32-byte cache line and Altivec. | |
46 | * | |
47 | * Registers at entry: | |
48 | * r4 = count of bytes to store (must be >= 32) | |
49 | * r8 = ptr to the 1st byte to store (16-byte aligned) | |
50 | * r9 = ptr to 16-byte pattern to store (16-byte aligned) | |
51 | * When we return: | |
52 | * r3 = not changed, since memset returns it | |
53 | * r4 = bytes remaining to store (will be <32) | |
54 | * r7 = not changed | |
55 | * r8 = ptr to next byte to store (still 16-byte aligned) | |
56 | * r12 = not changed (holds return value for memset) | |
57 | */ | |
58 | ||
59 | #define kBig (3*64) // big enough to warrant using dcba (NB: must be >= 3*64) | |
60 | ||
61 | .align 4 | |
62 | memset_g4: | |
63 | cmplwi cr1,r4,kBig // big enough to warrant using dcbz? | |
64 | mfspr r2,vrsave // we'll be using VRs | |
65 | oris r0,r2,0x8000 // we use vr0 | |
66 | andi. r5,r8,0x10 // is ptr 32-byte aligned? | |
67 | mtspr vrsave,r0 | |
68 | li r5,16 // get offsets for "stvx" | |
69 | lvx v0,0,r9 // load the pattern into v0 | |
70 | li r6,32 | |
71 | blt cr1,LShort // not big enough to bother with dcba | |
72 | li r9,48 | |
73 | ||
74 | // cache line align | |
75 | ||
76 | beq 2f // already aligned | |
77 | stvx v0,0,r8 // store another 16 bytes to align | |
78 | addi r8,r8,16 | |
79 | subi r4,r4,16 | |
80 | ||
81 | // Set up for inner loop. | |
82 | 2: | |
83 | srwi r0,r4,6 // get count of 64-byte chunks (>=2) | |
84 | dcba 0,r8 // pre-allocate first cache line (possibly nop'd) | |
85 | rlwinm r4,r4,0,0x3F // mask down to residual count (0..63) | |
86 | subic r0,r0,1 // loop 1-too-few times | |
87 | li r10,64 // get offsets to DCBA one chunk ahead | |
88 | li r11,64+32 | |
89 | mtctr r0 | |
90 | dcba r6,r8 // zero 2nd cache line (possibly nop'd) | |
91 | b 3f // enter DCBA loop | |
92 | ||
93 | // Loop over 64-byte chunks. We DCBA one chunk ahead, which is a little faster. | |
94 | // Note that some G4s do not benefit from the DCBAs. We nop them in that case. | |
95 | ||
96 | .align 4 | |
97 | 3: | |
98 | dcba r10,r8 // zero one 64-byte chunk ahead (possibly nop'd) | |
99 | dcba r11,r8 | |
100 | stvx v0,0,r8 | |
101 | stvx v0,r5,r8 | |
102 | stvx v0,r6,r8 | |
103 | stvx v0,r9,r8 | |
104 | addi r8,r8,64 | |
105 | bdnz+ 3b | |
106 | ||
107 | // Last chunk, which we've already DCBAd. | |
108 | ||
109 | stvx v0,0,r8 | |
110 | stvx v0,r5,r8 | |
111 | stvx v0,r6,r8 | |
112 | stvx v0,r9,r8 | |
113 | addi r8,r8,64 | |
114 | ||
115 | // loop over 32-byte chunks at end | |
116 | LShort: | |
117 | srwi. r0,r4,5 // get count of 32-byte chunks | |
118 | rlwinm r4,r4,0,0x1F // mask down to residual count (0..31) | |
119 | beq 7f // no chunks so done | |
120 | mtctr r0 | |
121 | 6: | |
122 | stvx v0,0,r8 | |
123 | stvx v0,r5,r8 | |
124 | addi r8,r8,32 | |
125 | bdnz 6b | |
126 | 7: | |
127 | mtspr vrsave,r2 // restore caller's vrsave | |
128 | blr | |
129 | ||
130 | ||
131 | COMMPAGE_DESCRIPTOR(memset_g4,_COMM_PAGE_MEMSET_PATTERN,kCache32+kHasAltivec,0, \ | |
132 | kCommPageDCBA+kCommPage32) |