1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved.
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 // Include SSSE3 CDEF code only for 32-bit x86, to support Valgrind.
13 // For normal use, we require SSE4.1, so cdef_*_sse4_1 will be used instead of
14 // these functions. However, 32-bit Valgrind does not support SSE4.1, so we
15 // include a fallback to SSSE3 to improve performance
16
17 #include "config/aom_config.h"
18
19 #if !AOM_ARCH_X86
20 #error "cdef_block_ssse3.c is included for compatibility with 32-bit x86 only"
21 #endif // !AOM_ARCH_X86
22
23 #include "aom_dsp/aom_simd.h"
24 #define SIMD_FUNC(name) name##_ssse3
25 #include "av1/common/cdef_block_simd.h"
26
cdef_find_dir_dual_ssse3(const uint16_t * img1,const uint16_t * img2,int stride,int32_t * var_out_1st,int32_t * var_out_2nd,int coeff_shift,int * out_dir_1st_8x8,int * out_dir_2nd_8x8)27 void cdef_find_dir_dual_ssse3(const uint16_t *img1, const uint16_t *img2,
28 int stride, int32_t *var_out_1st,
29 int32_t *var_out_2nd, int coeff_shift,
30 int *out_dir_1st_8x8, int *out_dir_2nd_8x8) {
31 // Process first 8x8.
32 *out_dir_1st_8x8 = cdef_find_dir(img1, stride, var_out_1st, coeff_shift);
33
34 // Process second 8x8.
35 *out_dir_2nd_8x8 = cdef_find_dir(img2, stride, var_out_2nd, coeff_shift);
36 }
37
cdef_copy_rect8_8bit_to_16bit_ssse3(uint16_t * dst,int dstride,const uint8_t * src,int sstride,int width,int height)38 void cdef_copy_rect8_8bit_to_16bit_ssse3(uint16_t *dst, int dstride,
39 const uint8_t *src, int sstride,
40 int width, int height) {
41 int j;
42 for (int i = 0; i < height; i++) {
43 for (j = 0; j < (width & ~0x7); j += 8) {
44 v64 row = v64_load_unaligned(&src[i * sstride + j]);
45 v128_store_unaligned(&dst[i * dstride + j], v128_unpack_u8_s16(row));
46 }
47 for (; j < width; j++) {
48 dst[i * dstride + j] = src[i * sstride + j];
49 }
50 }
51 }
52