xref: /aosp_15_r20/external/arm-optimized-routines/string/aarch64/strchrnul-mte.S (revision 412f47f9e737e10ed5cc46ec6a8d7fa2264f8a14)
1*412f47f9SXin Li/*
2*412f47f9SXin Li * strchrnul - find a character or nul in a string
3*412f47f9SXin Li *
4*412f47f9SXin Li * Copyright (c) 2020-2022, Arm Limited.
5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
6*412f47f9SXin Li */
7*412f47f9SXin Li
8*412f47f9SXin Li/* Assumptions:
9*412f47f9SXin Li *
10*412f47f9SXin Li * ARMv8-a, AArch64, Advanced SIMD.
11*412f47f9SXin Li * MTE compatible.
12*412f47f9SXin Li */
13*412f47f9SXin Li
14*412f47f9SXin Li#include "asmdefs.h"
15*412f47f9SXin Li
16*412f47f9SXin Li#define srcin		x0
17*412f47f9SXin Li#define chrin		w1
18*412f47f9SXin Li#define result		x0
19*412f47f9SXin Li
20*412f47f9SXin Li#define src		x2
21*412f47f9SXin Li#define tmp1		x1
22*412f47f9SXin Li#define tmp2		x3
23*412f47f9SXin Li
24*412f47f9SXin Li#define vrepchr		v0
25*412f47f9SXin Li#define vdata		v1
26*412f47f9SXin Li#define qdata		q1
27*412f47f9SXin Li#define vhas_nul	v2
28*412f47f9SXin Li#define vhas_chr	v3
29*412f47f9SXin Li#define vend		v4
30*412f47f9SXin Li#define dend		d4
31*412f47f9SXin Li
32*412f47f9SXin Li/*
33*412f47f9SXin Li   Core algorithm:
34*412f47f9SXin Li   For each 16-byte chunk we calculate a 64-bit nibble mask value with four bits
35*412f47f9SXin Li   per byte. We take 4 bits of every comparison byte with shift right and narrow
36*412f47f9SXin Li   by 4 instruction. Since the bits in the nibble mask reflect the order in
37*412f47f9SXin Li   which things occur in the original string, counting leading zeros identifies
38*412f47f9SXin Li   exactly which byte matched.  */
39*412f47f9SXin Li
40*412f47f9SXin LiENTRY (__strchrnul_aarch64_mte)
41*412f47f9SXin Li	PTR_ARG (0)
42*412f47f9SXin Li	bic	src, srcin, 15
43*412f47f9SXin Li	dup	vrepchr.16b, chrin
44*412f47f9SXin Li	ld1	{vdata.16b}, [src]
45*412f47f9SXin Li	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
46*412f47f9SXin Li	cmhs	vhas_chr.16b, vhas_chr.16b, vdata.16b
47*412f47f9SXin Li	lsl	tmp2, srcin, 2
48*412f47f9SXin Li	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
49*412f47f9SXin Li	fmov	tmp1, dend
50*412f47f9SXin Li	lsr	tmp1, tmp1, tmp2	/* Mask padding bits.  */
51*412f47f9SXin Li	cbz	tmp1, L(loop)
52*412f47f9SXin Li
53*412f47f9SXin Li	rbit	tmp1, tmp1
54*412f47f9SXin Li	clz	tmp1, tmp1
55*412f47f9SXin Li	add	result, srcin, tmp1, lsr 2
56*412f47f9SXin Li	ret
57*412f47f9SXin Li
58*412f47f9SXin Li	.p2align 4
59*412f47f9SXin LiL(loop):
60*412f47f9SXin Li	ldr	qdata, [src, 16]
61*412f47f9SXin Li	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
62*412f47f9SXin Li	cmhs	vhas_chr.16b, vhas_chr.16b, vdata.16b
63*412f47f9SXin Li	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b
64*412f47f9SXin Li	fmov	tmp1, dend
65*412f47f9SXin Li	cbnz	tmp1, L(end)
66*412f47f9SXin Li	ldr	qdata, [src, 32]!
67*412f47f9SXin Li	cmeq	vhas_chr.16b, vdata.16b, vrepchr.16b
68*412f47f9SXin Li	cmhs	vhas_chr.16b, vhas_chr.16b, vdata.16b
69*412f47f9SXin Li	umaxp	vend.16b, vhas_chr.16b, vhas_chr.16b
70*412f47f9SXin Li	fmov	tmp1, dend
71*412f47f9SXin Li	cbz	tmp1, L(loop)
72*412f47f9SXin Li	sub	src, src, 16
73*412f47f9SXin LiL(end):
74*412f47f9SXin Li	shrn	vend.8b, vhas_chr.8h, 4		/* 128->64 */
75*412f47f9SXin Li	add	src, src, 16
76*412f47f9SXin Li	fmov	tmp1, dend
77*412f47f9SXin Li#ifndef __AARCH64EB__
78*412f47f9SXin Li	rbit	tmp1, tmp1
79*412f47f9SXin Li#endif
80*412f47f9SXin Li	clz	tmp1, tmp1
81*412f47f9SXin Li	add	result, src, tmp1, lsr 2
82*412f47f9SXin Li	ret
83*412f47f9SXin Li
84*412f47f9SXin LiEND (__strchrnul_aarch64_mte)
85*412f47f9SXin Li
86