xref: /aosp_15_r20/external/arm-optimized-routines/string/aarch64/memrchr.S (revision 412f47f9e737e10ed5cc46ec6a8d7fa2264f8a14)
1*412f47f9SXin Li/*
2*412f47f9SXin Li * memrchr - find last character in a memory zone.
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2020-2022, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li/* Assumptions:
9*412f47f9SXin Li *
10*412f47f9SXin Li * ARMv8-a, AArch64, Advanced SIMD.
11*412f47f9SXin Li * MTE compatible.
12*412f47f9SXin Li */
13*412f47f9SXin Li
14*412f47f9SXin Li#include "asmdefs.h"
15*412f47f9SXin Li
16*412f47f9SXin Li#define srcin		x0
17*412f47f9SXin Li#define chrin		w1
18*412f47f9SXin Li#define cntin		x2
19*412f47f9SXin Li#define result		x0
20*412f47f9SXin Li
21*412f47f9SXin Li#define src		x3
22*412f47f9SXin Li#define cntrem		x4
23*412f47f9SXin Li#define synd		x5
24*412f47f9SXin Li#define shift		x6
25*412f47f9SXin Li#define	tmp		x7
26*412f47f9SXin Li#define end		x8
27*412f47f9SXin Li#define endm1		x9
28*412f47f9SXin Li
29*412f47f9SXin Li#define vrepchr		v0
30*412f47f9SXin Li#define qdata		q1
31*412f47f9SXin Li#define vdata		v1
32*412f47f9SXin Li#define vhas_chr	v2
33*412f47f9SXin Li#define vend		v3
34*412f47f9SXin Li#define dend		d3
35*412f47f9SXin Li
36*412f47f9SXin Li/*
37*412f47f9SXin Li   Core algorithm:
38*412f47f9SXin Li   For each 16-byte chunk we calculate a 64-bit nibble mask value with four bits
39*412f47f9SXin Li   per byte. We take 4 bits of every comparison byte with shift right and narrow
40*412f47f9SXin Li   by 4 instruction. Since the bits in the nibble mask reflect the order in
41*412f47f9SXin Li   which things occur in the original string, counting leading zeros identifies
42*412f47f9SXin Li   exactly which byte matched.  */
43*412f47f9SXin Li
44*412f47f9SXin LiENTRY (__memrchr_aarch64)
45*412f47f9SXin Li	PTR_ARG (0)
46*412f47f9SXin Li	add	end, srcin, cntin
47*412f47f9SXin Li	sub	endm1, end, 1
48*412f47f9SXin Li	bic	src, endm1, 15
49*412f47f9SXin Li	cbz	cntin, L(nomatch)
50*412f47f9SXin Li	ld1	{vdata.16b}, [src]
51*412f47f9SXin Li	dup	vrepchr.16b, chrin
52*412f47f9SXin Li	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
53*412f47f9SXin Li	neg	shift, end, lsl 2
54*412f47f9SXin Li	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
55*412f47f9SXin Li	fmov	synd, dend
56*412f47f9SXin Li	lsl	synd, synd, shift
57*412f47f9SXin Li	cbz	synd, L(start_loop)
58*412f47f9SXin Li
59*412f47f9SXin Li	clz	synd, synd
60*412f47f9SXin Li	sub	result, endm1, synd, lsr 2
61*412f47f9SXin Li	cmp	cntin, synd, lsr 2
62*412f47f9SXin Li	csel	result, result, xzr, hi
63*412f47f9SXin Li	ret
64*412f47f9SXin Li
65*412f47f9SXin Li	nop
66*412f47f9SXin LiL(start_loop):
67*412f47f9SXin Li	subs	cntrem, src, srcin
68*412f47f9SXin Li	b.ls	L(nomatch)
69*412f47f9SXin Li
70*412f47f9SXin Li	/* Make sure that it won't overread by a 16-byte chunk */
71*412f47f9SXin Li	sub	cntrem, cntrem, 1
72*412f47f9SXin Li	tbz	cntrem, 4, L(loop32_2)
73*412f47f9SXin Li	add	src, src, 16
74*412f47f9SXin Li
75*412f47f9SXin Li	.p2align 5
76*412f47f9SXin LiL(loop32):
77*412f47f9SXin Li	ldr	qdata, [src, -32]!
78*412f47f9SXin Li	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
79*412f47f9SXin Li	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
80*412f47f9SXin Li	fmov	synd, dend
81*412f47f9SXin Li	cbnz	synd, L(end)
82*412f47f9SXin Li
83*412f47f9SXin LiL(loop32_2):
84*412f47f9SXin Li	ldr	qdata, [src, -16]
85*412f47f9SXin Li	subs	cntrem, cntrem, 32
86*412f47f9SXin Li	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
87*412f47f9SXin Li	b.lo	L(end_2)
88*412f47f9SXin Li	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b		/* 128->64 */
89*412f47f9SXin Li	fmov	synd, dend
90*412f47f9SXin Li	cbz	synd, L(loop32)
91*412f47f9SXin LiL(end_2):
92*412f47f9SXin Li	sub	src, src, 16
93*412f47f9SXin LiL(end):
94*412f47f9SXin Li	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
95*412f47f9SXin Li	fmov	synd, dend
96*412f47f9SXin Li
97*412f47f9SXin Li	add	tmp, src, 15
98*412f47f9SXin Li#ifdef __AARCH64EB__
99*412f47f9SXin Li	rbit	synd, synd
100*412f47f9SXin Li#endif
101*412f47f9SXin Li	clz	synd, synd
102*412f47f9SXin Li	sub	tmp, tmp, synd, lsr 2
103*412f47f9SXin Li	cmp	tmp, srcin
104*412f47f9SXin Li	csel	result, tmp, xzr, hs
105*412f47f9SXin Li	ret
106*412f47f9SXin Li
107*412f47f9SXin LiL(nomatch):
108*412f47f9SXin Li	mov	result, 0
109*412f47f9SXin Li	ret
110*412f47f9SXin Li
111*412f47f9SXin LiEND (__memrchr_aarch64)
112*412f47f9SXin Li
113