1 | /* |
---|
2 | * Copyright (c) 2013 |
---|
3 | * MIPS Technologies, Inc., California. |
---|
4 | * |
---|
5 | * Redistribution and use in source and binary forms, with or without |
---|
6 | * modification, are permitted provided that the following conditions |
---|
7 | * are met: |
---|
8 | * 1. Redistributions of source code must retain the above copyright |
---|
9 | * notice, this list of conditions and the following disclaimer. |
---|
10 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
11 | * notice, this list of conditions and the following disclaimer in the |
---|
12 | * documentation and/or other materials provided with the distribution. |
---|
13 | * 3. Neither the name of the MIPS Technologies, Inc., nor the names of its |
---|
14 | * contributors may be used to endorse or promote products derived from |
---|
15 | * this software without specific prior written permission. |
---|
16 | * |
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE MIPS TECHNOLOGIES, INC. ``AS IS'' AND |
---|
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
20 | * ARE DISCLAIMED. IN NO EVENT SHALL THE MIPS TECHNOLOGIES, INC. BE LIABLE |
---|
21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
27 | * SUCH DAMAGE. |
---|
28 | */ |
---|
29 | |
---|
30 | #ifdef ANDROID_CHANGES |
---|
31 | # include "machine/asm.h" |
---|
32 | # include "machine/regdef.h" |
---|
33 | # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE |
---|
34 | #elif _LIBC |
---|
35 | # include <sysdep.h> |
---|
36 | # include <regdef.h> |
---|
37 | # include <sys/asm.h> |
---|
38 | # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE |
---|
39 | #elif _COMPILING_NEWLIB |
---|
40 | # include "machine/asm.h" |
---|
41 | # include "machine/regdef.h" |
---|
42 | # define PREFETCH_STORE_HINT PREFETCH_HINT_PREPAREFORSTORE |
---|
43 | #else |
---|
44 | # include <regdef.h> |
---|
45 | # include <sys/asm.h> |
---|
46 | #endif |
---|
47 | |
---|
48 | /* Check to see if the MIPS architecture we are compiling for supports |
---|
49 | prefetching. */ |
---|
50 | |
---|
51 | #if (__mips == 4) || (__mips == 5) || (__mips == 32) || (__mips == 64) |
---|
52 | # ifndef DISABLE_PREFETCH |
---|
53 | # define USE_PREFETCH |
---|
54 | # endif |
---|
55 | #endif |
---|
56 | |
---|
57 | #if defined(_MIPS_SIM) && ((_MIPS_SIM == _ABI64) || (_MIPS_SIM == _ABIN32)) |
---|
58 | # ifndef DISABLE_DOUBLE |
---|
59 | # define USE_DOUBLE |
---|
60 | # endif |
---|
61 | #endif |
---|
62 | |
---|
63 | #ifndef USE_DOUBLE |
---|
64 | # ifndef DISABLE_DOUBLE_ALIGN |
---|
65 | # define DOUBLE_ALIGN |
---|
66 | # endif |
---|
67 | #endif |
---|
68 | |
---|
69 | /* Some asm.h files do not have the L macro definition. */ |
---|
70 | #ifndef L |
---|
71 | # if _MIPS_SIM == _ABIO32 |
---|
72 | # define L(label) $L ## label |
---|
73 | # else |
---|
74 | # define L(label) .L ## label |
---|
75 | # endif |
---|
76 | #endif |
---|
77 | |
---|
78 | /* Some asm.h files do not have the PTR_ADDIU macro definition. */ |
---|
79 | #ifndef PTR_ADDIU |
---|
80 | # ifdef USE_DOUBLE |
---|
81 | # define PTR_ADDIU daddiu |
---|
82 | # else |
---|
83 | # define PTR_ADDIU addiu |
---|
84 | # endif |
---|
85 | #endif |
---|
86 | |
---|
87 | /* New R6 instructions that may not be in asm.h. */ |
---|
88 | #ifndef PTR_LSA |
---|
89 | # if _MIPS_SIM == _ABI64 |
---|
90 | # define PTR_LSA dlsa |
---|
91 | # else |
---|
92 | # define PTR_LSA lsa |
---|
93 | # endif |
---|
94 | #endif |
---|
95 | |
---|
96 | /* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE |
---|
97 | or PREFETCH_STORE_STREAMED offers a large performance advantage |
---|
98 | but PREPAREFORSTORE has some special restrictions to consider. |
---|
99 | |
---|
100 | Prefetch with the 'prepare for store' hint does not copy a memory |
---|
101 | location into the cache, it just allocates a cache line and zeros |
---|
102 | it out. This means that if you do not write to the entire cache |
---|
103 | line before writing it out to memory some data will get zero'ed out |
---|
104 | when the cache line is written back to memory and data will be lost. |
---|
105 | |
---|
106 | There are ifdef'ed sections of this memcpy to make sure that it does not |
---|
107 | do prefetches on cache lines that are not going to be completely written. |
---|
108 | This code is only needed and only used when PREFETCH_STORE_HINT is set to |
---|
109 | PREFETCH_HINT_PREPAREFORSTORE. This code assumes that cache lines are |
---|
110 | less than MAX_PREFETCH_SIZE bytes and if the cache line is larger it will |
---|
111 | not work correctly. */ |
---|
112 | |
---|
113 | #ifdef USE_PREFETCH |
---|
114 | # define PREFETCH_HINT_STORE 1 |
---|
115 | # define PREFETCH_HINT_STORE_STREAMED 5 |
---|
116 | # define PREFETCH_HINT_STORE_RETAINED 7 |
---|
117 | # define PREFETCH_HINT_PREPAREFORSTORE 30 |
---|
118 | |
---|
119 | /* If we have not picked out what hints to use at this point use the |
---|
120 | standard load and store prefetch hints. */ |
---|
121 | # ifndef PREFETCH_STORE_HINT |
---|
122 | # define PREFETCH_STORE_HINT PREFETCH_HINT_STORE |
---|
123 | # endif |
---|
124 | |
---|
125 | /* We double everything when USE_DOUBLE is true so we do 2 prefetches to |
---|
126 | get 64 bytes in that case. The assumption is that each individual |
---|
127 | prefetch brings in 32 bytes. */ |
---|
128 | # ifdef USE_DOUBLE |
---|
129 | # define PREFETCH_CHUNK 64 |
---|
130 | # define PREFETCH_FOR_STORE(chunk, reg) \ |
---|
131 | pref PREFETCH_STORE_HINT, (chunk)*64(reg); \ |
---|
132 | pref PREFETCH_STORE_HINT, ((chunk)*64)+32(reg) |
---|
133 | # else |
---|
134 | # define PREFETCH_CHUNK 32 |
---|
135 | # define PREFETCH_FOR_STORE(chunk, reg) \ |
---|
136 | pref PREFETCH_STORE_HINT, (chunk)*32(reg) |
---|
137 | # endif |
---|
138 | |
---|
139 | /* MAX_PREFETCH_SIZE is the maximum size of a prefetch, it must not be less |
---|
140 | than PREFETCH_CHUNK, the assumed size of each prefetch. If the real size |
---|
141 | of a prefetch is greater than MAX_PREFETCH_SIZE and the PREPAREFORSTORE |
---|
142 | hint is used, the code will not work correctly. If PREPAREFORSTORE is not |
---|
143 | used than MAX_PREFETCH_SIZE does not matter. */ |
---|
144 | # define MAX_PREFETCH_SIZE 128 |
---|
145 | /* PREFETCH_LIMIT is set based on the fact that we never use an offset greater |
---|
146 | than 5 on a STORE prefetch and that a single prefetch can never be larger |
---|
147 | than MAX_PREFETCH_SIZE. We add the extra 32 when USE_DOUBLE is set because |
---|
148 | we actually do two prefetches in that case, one 32 bytes after the other. */ |
---|
149 | # ifdef USE_DOUBLE |
---|
150 | # define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + 32 + MAX_PREFETCH_SIZE |
---|
151 | # else |
---|
152 | # define PREFETCH_LIMIT (5 * PREFETCH_CHUNK) + MAX_PREFETCH_SIZE |
---|
153 | # endif |
---|
154 | |
---|
155 | # if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) \ |
---|
156 | && ((PREFETCH_CHUNK * 4) < MAX_PREFETCH_SIZE) |
---|
157 | /* We cannot handle this because the initial prefetches may fetch bytes that |
---|
158 | are before the buffer being copied. We start copies with an offset |
---|
159 | of 4 so avoid this situation when using PREPAREFORSTORE. */ |
---|
160 | # error "PREFETCH_CHUNK is too large and/or MAX_PREFETCH_SIZE is too small." |
---|
161 | # endif |
---|
162 | #else /* USE_PREFETCH not defined */ |
---|
163 | # define PREFETCH_FOR_STORE(offset, reg) |
---|
164 | #endif |
---|
165 | |
---|
166 | #if __mips_isa_rev > 5 |
---|
167 | # if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) |
---|
168 | # undef PREFETCH_STORE_HINT |
---|
169 | # define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED |
---|
170 | # endif |
---|
171 | # define R6_CODE |
---|
172 | #endif |
---|
173 | |
---|
174 | /* Allow the routine to be named something else if desired. */ |
---|
175 | #ifndef MEMSET_NAME |
---|
176 | # define MEMSET_NAME memset |
---|
177 | #endif |
---|
178 | |
---|
179 | /* We load/store 64 bits at a time when USE_DOUBLE is true. |
---|
180 | The C_ prefix stands for CHUNK and is used to avoid macro name |
---|
181 | conflicts with system header files. */ |
---|
182 | |
---|
183 | #ifdef USE_DOUBLE |
---|
184 | # define C_ST sd |
---|
185 | # if __MIPSEB |
---|
186 | # define C_STHI sdl /* high part is left in big-endian */ |
---|
187 | # else |
---|
188 | # define C_STHI sdr /* high part is right in little-endian */ |
---|
189 | # endif |
---|
190 | #else |
---|
191 | # define C_ST sw |
---|
192 | # if __MIPSEB |
---|
193 | # define C_STHI swl /* high part is left in big-endian */ |
---|
194 | # else |
---|
195 | # define C_STHI swr /* high part is right in little-endian */ |
---|
196 | # endif |
---|
197 | #endif |
---|
198 | |
---|
199 | /* Bookkeeping values for 32 vs. 64 bit mode. */ |
---|
200 | #ifdef USE_DOUBLE |
---|
201 | # define NSIZE 8 |
---|
202 | # define NSIZEMASK 0x3f |
---|
203 | # define NSIZEDMASK 0x7f |
---|
204 | #else |
---|
205 | # define NSIZE 4 |
---|
206 | # define NSIZEMASK 0x1f |
---|
207 | # define NSIZEDMASK 0x3f |
---|
208 | #endif |
---|
209 | #define UNIT(unit) ((unit)*NSIZE) |
---|
210 | #define UNITM1(unit) (((unit)*NSIZE)-1) |
---|
211 | |
---|
212 | #ifdef ANDROID_CHANGES |
---|
213 | LEAF(MEMSET_NAME,0) |
---|
214 | #else |
---|
215 | LEAF(MEMSET_NAME) |
---|
216 | #endif |
---|
217 | |
---|
218 | .set nomips16 |
---|
219 | .set noreorder |
---|
220 | /* If the size is less than 2*NSIZE (8 or 16), go to L(lastb). Regardless of |
---|
221 | size, copy dst pointer to v0 for the return value. */ |
---|
222 | slti t2,a2,(2 * NSIZE) |
---|
223 | bne t2,zero,L(lastb) |
---|
224 | move v0,a0 |
---|
225 | |
---|
226 | /* If memset value is not zero, we copy it to all the bytes in a 32 or 64 |
---|
227 | bit word. */ |
---|
228 | beq a1,zero,L(set0) /* If memset value is zero no smear */ |
---|
229 | PTR_SUBU a3,zero,a0 |
---|
230 | nop |
---|
231 | |
---|
232 | /* smear byte into 32 or 64 bit word */ |
---|
233 | #if ((__mips == 64) || (__mips == 32)) && (__mips_isa_rev >= 2) |
---|
234 | # ifdef USE_DOUBLE |
---|
235 | dins a1, a1, 8, 8 /* Replicate fill byte into half-word. */ |
---|
236 | dins a1, a1, 16, 16 /* Replicate fill byte into word. */ |
---|
237 | dins a1, a1, 32, 32 /* Replicate fill byte into dbl word. */ |
---|
238 | # else |
---|
239 | ins a1, a1, 8, 8 /* Replicate fill byte into half-word. */ |
---|
240 | ins a1, a1, 16, 16 /* Replicate fill byte into word. */ |
---|
241 | # endif |
---|
242 | #else |
---|
243 | # ifdef USE_DOUBLE |
---|
244 | and a1,0xff |
---|
245 | dsll t2,a1,8 |
---|
246 | or a1,t2 |
---|
247 | dsll t2,a1,16 |
---|
248 | or a1,t2 |
---|
249 | dsll t2,a1,32 |
---|
250 | or a1,t2 |
---|
251 | # else |
---|
252 | and a1,0xff |
---|
253 | sll t2,a1,8 |
---|
254 | or a1,t2 |
---|
255 | sll t2,a1,16 |
---|
256 | or a1,t2 |
---|
257 | # endif |
---|
258 | #endif |
---|
259 | |
---|
260 | /* If the destination address is not aligned do a partial store to get it |
---|
261 | aligned. If it is already aligned just jump to L(aligned). */ |
---|
262 | L(set0): |
---|
263 | #ifndef R6_CODE |
---|
264 | andi t2,a3,(NSIZE-1) /* word-unaligned address? */ |
---|
265 | beq t2,zero,L(aligned) /* t2 is the unalignment count */ |
---|
266 | PTR_SUBU a2,a2,t2 |
---|
267 | C_STHI a1,0(a0) |
---|
268 | PTR_ADDU a0,a0,t2 |
---|
269 | #else /* R6_CODE */ |
---|
270 | andi t2,a0,(NSIZE-1) |
---|
271 | lapc t9,L(atable) |
---|
272 | PTR_LSA t9,t2,t9,2 |
---|
273 | jrc t9 |
---|
274 | L(atable): |
---|
275 | bc L(aligned) |
---|
276 | # ifdef USE_DOUBLE |
---|
277 | bc L(lb7) |
---|
278 | bc L(lb6) |
---|
279 | bc L(lb5) |
---|
280 | bc L(lb4) |
---|
281 | # endif |
---|
282 | bc L(lb3) |
---|
283 | bc L(lb2) |
---|
284 | bc L(lb1) |
---|
285 | L(lb7): |
---|
286 | sb a1,6(a0) |
---|
287 | L(lb6): |
---|
288 | sb a1,5(a0) |
---|
289 | L(lb5): |
---|
290 | sb a1,4(a0) |
---|
291 | L(lb4): |
---|
292 | sb a1,3(a0) |
---|
293 | L(lb3): |
---|
294 | sb a1,2(a0) |
---|
295 | L(lb2): |
---|
296 | sb a1,1(a0) |
---|
297 | L(lb1): |
---|
298 | sb a1,0(a0) |
---|
299 | |
---|
300 | li t9,NSIZE |
---|
301 | subu t2,t9,t2 |
---|
302 | PTR_SUBU a2,a2,t2 |
---|
303 | PTR_ADDU a0,a0,t2 |
---|
304 | #endif /* R6_CODE */ |
---|
305 | |
---|
306 | L(aligned): |
---|
307 | /* If USE_DOUBLE is not set we may still want to align the data on a 16 |
---|
308 | byte boundry instead of an 8 byte boundry to maximize the opportunity |
---|
309 | of proAptiv chips to do memory bonding (combining two sequential 4 |
---|
310 | byte stores into one 8 byte store). We know there are at least 4 bytes |
---|
311 | left to store or we would have jumped to L(lastb) earlier in the code. */ |
---|
312 | #ifdef DOUBLE_ALIGN |
---|
313 | andi t2,a3,4 |
---|
314 | beq t2,zero,L(double_aligned) |
---|
315 | PTR_SUBU a2,a2,t2 |
---|
316 | sw a1,0(a0) |
---|
317 | PTR_ADDU a0,a0,t2 |
---|
318 | L(double_aligned): |
---|
319 | #endif |
---|
320 | |
---|
321 | /* Now the destination is aligned to (word or double word) aligned address |
---|
322 | Set a2 to count how many bytes we have to copy after all the 64/128 byte |
---|
323 | chunks are copied and a3 to the dest pointer after all the 64/128 byte |
---|
324 | chunks have been copied. We will loop, incrementing a0 until it equals |
---|
325 | a3. */ |
---|
326 | andi t8,a2,NSIZEDMASK /* any whole 64-byte/128-byte chunks? */ |
---|
327 | beq a2,t8,L(chkw) /* if a2==t8, no 64-byte/128-byte chunks */ |
---|
328 | PTR_SUBU a3,a2,t8 /* subtract from a2 the reminder */ |
---|
329 | PTR_ADDU a3,a0,a3 /* Now a3 is the final dst after loop */ |
---|
330 | |
---|
331 | /* When in the loop we may prefetch with the 'prepare to store' hint, |
---|
332 | in this case the a0+x should not be past the "t0-32" address. This |
---|
333 | means: for x=128 the last "safe" a0 address is "t0-160". Alternatively, |
---|
334 | for x=64 the last "safe" a0 address is "t0-96" In the current version we |
---|
335 | will use "prefetch hint,128(a0)", so "t0-160" is the limit. */ |
---|
336 | #if defined(USE_PREFETCH) \ |
---|
337 | && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) |
---|
338 | PTR_ADDU t0,a0,a2 /* t0 is the "past the end" address */ |
---|
339 | PTR_SUBU t9,t0,PREFETCH_LIMIT /* t9 is the "last safe pref" address */ |
---|
340 | #endif |
---|
341 | #if defined(USE_PREFETCH) \ |
---|
342 | && (PREFETCH_STORE_HINT != PREFETCH_HINT_PREPAREFORSTORE) |
---|
343 | PREFETCH_FOR_STORE (1, a0) |
---|
344 | PREFETCH_FOR_STORE (2, a0) |
---|
345 | PREFETCH_FOR_STORE (3, a0) |
---|
346 | #endif |
---|
347 | |
---|
348 | L(loop16w): |
---|
349 | #if defined(USE_PREFETCH) \ |
---|
350 | && (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE) |
---|
351 | sltu v1,t9,a0 /* If a0 > t9 don't use next prefetch */ |
---|
352 | bgtz v1,L(skip_pref) |
---|
353 | nop |
---|
354 | #endif |
---|
355 | #ifndef R6_CODE |
---|
356 | PREFETCH_FOR_STORE (4, a0) |
---|
357 | PREFETCH_FOR_STORE (5, a0) |
---|
358 | #else |
---|
359 | PREFETCH_FOR_STORE (2, a0) |
---|
360 | #endif |
---|
361 | L(skip_pref): |
---|
362 | C_ST a1,UNIT(0)(a0) |
---|
363 | C_ST a1,UNIT(1)(a0) |
---|
364 | C_ST a1,UNIT(2)(a0) |
---|
365 | C_ST a1,UNIT(3)(a0) |
---|
366 | C_ST a1,UNIT(4)(a0) |
---|
367 | C_ST a1,UNIT(5)(a0) |
---|
368 | C_ST a1,UNIT(6)(a0) |
---|
369 | C_ST a1,UNIT(7)(a0) |
---|
370 | C_ST a1,UNIT(8)(a0) |
---|
371 | C_ST a1,UNIT(9)(a0) |
---|
372 | C_ST a1,UNIT(10)(a0) |
---|
373 | C_ST a1,UNIT(11)(a0) |
---|
374 | C_ST a1,UNIT(12)(a0) |
---|
375 | C_ST a1,UNIT(13)(a0) |
---|
376 | C_ST a1,UNIT(14)(a0) |
---|
377 | C_ST a1,UNIT(15)(a0) |
---|
378 | PTR_ADDIU a0,a0,UNIT(16) /* adding 64/128 to dest */ |
---|
379 | bne a0,a3,L(loop16w) |
---|
380 | nop |
---|
381 | move a2,t8 |
---|
382 | |
---|
383 | /* Here we have dest word-aligned but less than 64-bytes or 128 bytes to go. |
---|
384 | Check for a 32(64) byte chunk and copy if if there is one. Otherwise |
---|
385 | jump down to L(chk1w) to handle the tail end of the copy. */ |
---|
386 | L(chkw): |
---|
387 | andi t8,a2,NSIZEMASK /* is there a 32-byte/64-byte chunk. */ |
---|
388 | /* the t8 is the reminder count past 32-bytes */ |
---|
389 | beq a2,t8,L(chk1w)/* when a2==t8, no 32-byte chunk */ |
---|
390 | nop |
---|
391 | C_ST a1,UNIT(0)(a0) |
---|
392 | C_ST a1,UNIT(1)(a0) |
---|
393 | C_ST a1,UNIT(2)(a0) |
---|
394 | C_ST a1,UNIT(3)(a0) |
---|
395 | C_ST a1,UNIT(4)(a0) |
---|
396 | C_ST a1,UNIT(5)(a0) |
---|
397 | C_ST a1,UNIT(6)(a0) |
---|
398 | C_ST a1,UNIT(7)(a0) |
---|
399 | PTR_ADDIU a0,a0,UNIT(8) |
---|
400 | |
---|
401 | /* Here we have less than 32(64) bytes to set. Set up for a loop to |
---|
402 | copy one word (or double word) at a time. Set a2 to count how many |
---|
403 | bytes we have to copy after all the word (or double word) chunks are |
---|
404 | copied and a3 to the dest pointer after all the (d)word chunks have |
---|
405 | been copied. We will loop, incrementing a0 until a0 equals a3. */ |
---|
406 | L(chk1w): |
---|
407 | andi a2,t8,(NSIZE-1) /* a2 is the reminder past one (d)word chunks */ |
---|
408 | beq a2,t8,L(lastb) |
---|
409 | PTR_SUBU a3,t8,a2 /* a3 is count of bytes in one (d)word chunks */ |
---|
410 | PTR_ADDU a3,a0,a3 /* a3 is the dst address after loop */ |
---|
411 | |
---|
412 | /* copying in words (4-byte or 8 byte chunks) */ |
---|
413 | L(wordCopy_loop): |
---|
414 | PTR_ADDIU a0,a0,UNIT(1) |
---|
415 | bne a0,a3,L(wordCopy_loop) |
---|
416 | C_ST a1,UNIT(-1)(a0) |
---|
417 | |
---|
418 | /* Copy the last 8 (or 16) bytes */ |
---|
419 | L(lastb): |
---|
420 | blez a2,L(leave) |
---|
421 | PTR_ADDU a3,a0,a2 /* a3 is the last dst address */ |
---|
422 | L(lastbloop): |
---|
423 | PTR_ADDIU a0,a0,1 |
---|
424 | bne a0,a3,L(lastbloop) |
---|
425 | sb a1,-1(a0) |
---|
426 | L(leave): |
---|
427 | j ra |
---|
428 | nop |
---|
429 | |
---|
430 | .set at |
---|
431 | .set reorder |
---|
432 | END(MEMSET_NAME) |
---|
433 | #ifndef ANDROID_CHANGES |
---|
434 | # ifdef _LIBC |
---|
435 | libc_hidden_builtin_def (MEMSET_NAME) |
---|
436 | # endif |
---|
437 | #endif |
---|