2 * Copyright (c) 2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/proc_reg.h>
34 * A reasonably well-optimized bzero/memset. Should work equally well on arm11 and arm9 based
37 * The algorithm is to align the destination pointer on a 32 byte boundary and then
38 * blast data 64 bytes at a time, in two stores of 32 bytes per loop.
45 * void *secure_memset(void * addr, int pattern, size_t length)
47 * It is important that this function remains defined in assembly to avoid
48 * compiler optimizations.
51 /* void *memset(void *ptr, int c, size_t len); */
53 /* move len into r1, unpack c into r2 */
56 orr r1, r1, r1, lsl #8
57 orr r2, r1, r1, lsl #16
61 /* void bzero(void *ptr, size_t len); */
63 /* zero out r2 so we can be just like memset(0) */
67 /* move the base pointer into r12 and leave r0 alone so that we return the original pointer */
70 /* copy r2 into r3 for 64-bit stores */
73 /* check for zero len */
77 /* fall back to a bytewise store for less than 32 bytes */
81 /* check for 32 byte unaligned ptr */
85 /* make sure we have more than 64 bytes to zero */
87 blt L_lessthan64aligned
89 /* >= 64 bytes of len, 32 byte aligned */
92 /* we need some registers, avoid r7 (frame pointer) and r9 (thread register) */
93 stmfd sp!, { r4-r6, r8, r10-r11 }
101 /* pre-subtract 64 from the len to avoid an extra compare in the loop */
105 stmia r12!, { r2-r6, r8, r10-r11 }
107 stmia r12!, { r2-r6, r8, r10-r11 }
110 /* restore the saved regs */
111 ldmfd sp!, { r4-r6, r8, r10-r11 }
113 /* check for completion (had previously subtracted an extra 64 from len) */
118 /* do we have 16 or more bytes left */
120 stmiage r12!, { r2-r3 }
121 stmiage r12!, { r2-r3 }
123 bgt L_lessthan64aligned
127 /* store 0 to 15 bytes */
128 mov r1, r1, lsl #28 /* move the remaining len bits [3:0] to the flags area of cpsr */
131 stmiami r12!, { r2-r3 } /* n is set, store 8 bytes */
132 streq r2, [r12], #4 /* z is set, store 4 bytes */
133 strhcs r2, [r12], #2 /* c is set, store 2 bytes */
134 strbvs r2, [r12], #1 /* v is set, store 1 byte */
138 /* bytewise copy, 2 bytes at a time, alignment not guaranteed */
146 /* unaligned on 32 byte boundary, store 1-15 bytes until we're 16 byte aligned */
148 rsb r3, r3, #0x00000000
151 strbvs r2, [r12], #1 /* v is set, unaligned in the 1s column */
152 strhcs r2, [r12], #2 /* c is set, unaligned in the 2s column */
153 streq r2, [r12], #4 /* z is set, unaligned in the 4s column */
154 strmi r2, [r12], #4 /* n is set, unaligned in the 8s column */
157 subs r1, r1, r3, lsr #28
160 /* we had previously trashed r3, restore it */
163 /* now make sure we're 32 byte aligned */
165 stmiane r12!, { r2-r3 }
166 stmiane r12!, { r2-r3 }
169 /* we're now aligned, check for >= 64 bytes left */
171 bge L_64ormorealigned
172 b L_lessthan64aligned