1*412f47f9SXin Li/* 2*412f47f9SXin Li * memchr - find a character in a memory zone 3*412f47f9SXin Li * 4*412f47f9SXin Li * Copyright (c) 2020-2022, Arm Limited. 5*412f47f9SXin Li * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception 6*412f47f9SXin Li */ 7*412f47f9SXin Li 8*412f47f9SXin Li/* Assumptions: 9*412f47f9SXin Li * 10*412f47f9SXin Li * ARMv8-a, AArch64, Advanced SIMD. 11*412f47f9SXin Li * MTE compatible. 12*412f47f9SXin Li */ 13*412f47f9SXin Li 14*412f47f9SXin Li#include "asmdefs.h" 15*412f47f9SXin Li 16*412f47f9SXin Li#define srcin x0 17*412f47f9SXin Li#define chrin w1 18*412f47f9SXin Li#define cntin x2 19*412f47f9SXin Li#define result x0 20*412f47f9SXin Li 21*412f47f9SXin Li#define src x3 22*412f47f9SXin Li#define cntrem x4 23*412f47f9SXin Li#define synd x5 24*412f47f9SXin Li#define shift x6 25*412f47f9SXin Li#define tmp x7 26*412f47f9SXin Li 27*412f47f9SXin Li#define vrepchr v0 28*412f47f9SXin Li#define qdata q1 29*412f47f9SXin Li#define vdata v1 30*412f47f9SXin Li#define vhas_chr v2 31*412f47f9SXin Li#define vend v3 32*412f47f9SXin Li#define dend d3 33*412f47f9SXin Li 34*412f47f9SXin Li/* 35*412f47f9SXin Li Core algorithm: 36*412f47f9SXin Li For each 16-byte chunk we calculate a 64-bit nibble mask value with four bits 37*412f47f9SXin Li per byte. We take 4 bits of every comparison byte with shift right and narrow 38*412f47f9SXin Li by 4 instruction. Since the bits in the nibble mask reflect the order in 39*412f47f9SXin Li which things occur in the original string, counting leading zeros identifies 40*412f47f9SXin Li exactly which byte matched. */ 41*412f47f9SXin Li 42*412f47f9SXin LiENTRY (__memchr_aarch64_mte) 43*412f47f9SXin Li PTR_ARG (0) 44*412f47f9SXin Li SIZE_ARG (2) 45*412f47f9SXin Li bic src, srcin, 15 46*412f47f9SXin Li cbz cntin, L(nomatch) 47*412f47f9SXin Li ld1 {vdata.16b}, [src] 48*412f47f9SXin Li dup vrepchr.16b, chrin 49*412f47f9SXin Li cmeq vhas_chr.16b, vdata.16b, vrepchr.16b 50*412f47f9SXin Li lsl shift, srcin, 2 51*412f47f9SXin Li shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */ 52*412f47f9SXin Li fmov synd, dend 53*412f47f9SXin Li lsr synd, synd, shift 54*412f47f9SXin Li cbz synd, L(start_loop) 55*412f47f9SXin Li 56*412f47f9SXin Li rbit synd, synd 57*412f47f9SXin Li clz synd, synd 58*412f47f9SXin Li cmp cntin, synd, lsr 2 59*412f47f9SXin Li add result, srcin, synd, lsr 2 60*412f47f9SXin Li csel result, result, xzr, hi 61*412f47f9SXin Li ret 62*412f47f9SXin Li 63*412f47f9SXin Li .p2align 3 64*412f47f9SXin LiL(start_loop): 65*412f47f9SXin Li sub tmp, src, srcin 66*412f47f9SXin Li add tmp, tmp, 17 67*412f47f9SXin Li subs cntrem, cntin, tmp 68*412f47f9SXin Li b.lo L(nomatch) 69*412f47f9SXin Li 70*412f47f9SXin Li /* Make sure that it won't overread by a 16-byte chunk */ 71*412f47f9SXin Li tbz cntrem, 4, L(loop32_2) 72*412f47f9SXin Li sub src, src, 16 73*412f47f9SXin Li .p2align 4 74*412f47f9SXin LiL(loop32): 75*412f47f9SXin Li ldr qdata, [src, 32]! 76*412f47f9SXin Li cmeq vhas_chr.16b, vdata.16b, vrepchr.16b 77*412f47f9SXin Li umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */ 78*412f47f9SXin Li fmov synd, dend 79*412f47f9SXin Li cbnz synd, L(end) 80*412f47f9SXin Li 81*412f47f9SXin LiL(loop32_2): 82*412f47f9SXin Li ldr qdata, [src, 16] 83*412f47f9SXin Li cmeq vhas_chr.16b, vdata.16b, vrepchr.16b 84*412f47f9SXin Li subs cntrem, cntrem, 32 85*412f47f9SXin Li b.lo L(end_2) 86*412f47f9SXin Li umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */ 87*412f47f9SXin Li fmov synd, dend 88*412f47f9SXin Li cbz synd, L(loop32) 89*412f47f9SXin LiL(end_2): 90*412f47f9SXin Li add src, src, 16 91*412f47f9SXin LiL(end): 92*412f47f9SXin Li shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */ 93*412f47f9SXin Li sub cntrem, src, srcin 94*412f47f9SXin Li fmov synd, dend 95*412f47f9SXin Li sub cntrem, cntin, cntrem 96*412f47f9SXin Li#ifndef __AARCH64EB__ 97*412f47f9SXin Li rbit synd, synd 98*412f47f9SXin Li#endif 99*412f47f9SXin Li clz synd, synd 100*412f47f9SXin Li cmp cntrem, synd, lsr 2 101*412f47f9SXin Li add result, src, synd, lsr 2 102*412f47f9SXin Li csel result, result, xzr, hi 103*412f47f9SXin Li ret 104*412f47f9SXin Li 105*412f47f9SXin LiL(nomatch): 106*412f47f9SXin Li mov result, 0 107*412f47f9SXin Li ret 108*412f47f9SXin Li 109*412f47f9SXin LiEND (__memchr_aarch64_mte) 110*412f47f9SXin Li 111