xref: /aosp_15_r20/external/libvpx/build/make/thumb.pm (revision fb1b10ab9aebc7c7068eedab379b749d7e3900be)
1#!/usr/bin/env perl
2##
3##  Copyright (c) 2013 The WebM project authors. All Rights Reserved.
4##
5##  Use of this source code is governed by a BSD-style license
6##  that can be found in the LICENSE file in the root of the source
7##  tree. An additional intellectual property rights grant can be found
8##  in the file PATENTS.  All contributing project authors may
9##  be found in the AUTHORS file in the root of the source tree.
10##
11
12package thumb;
13
14sub FixThumbInstructions($)
15{
16    # Write additions with shifts, such as "add r10, r11, lsl #8",
17    # in three operand form, "add r10, r10, r11, lsl #8".
18    s/(add\s+)(r\d+),\s*(r\d+),\s*(lsl #\d+)/$1$2, $2, $3, $4/g;
19
20    # Convert additions with a non-constant shift into a sequence
21    # with left shift, addition and a right shift (to restore the
22    # register to the original value). Currently the right shift
23    # isn't necessary in the code base since the values in these
24    # registers aren't used, but doing the shift for consistency.
25    # This converts instructions such as "add r12, r12, r5, lsl r4"
26    # into the sequence "lsl r5, r4", "add r12, r12, r5", "lsr r5, r4".
27    s/^(\s*)(add)(\s+)(r\d+),\s*(r\d+),\s*(r\d+),\s*lsl (r\d+)/$1lsl$3$6, $7\n$1$2$3$4, $5, $6\n$1lsr$3$6, $7/g;
28
29    # Convert loads with right shifts in the indexing into a
30    # sequence of an add, load and sub. This converts
31    # "ldrb r4, [r9, lr, asr #1]" into "add r9, r9, lr, asr #1",
32    # "ldrb r9, [r9]", "sub r9, r9, lr, asr #1".
33    s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+),\s*(asr #\d+)\]/$1add $3$5, $5, $6, $7\n$1$2$3$4, [$5]\n$1sub $3$5, $5, $6, $7/g;
34
35    # Convert register indexing with writeback into a separate add
36    # instruction. This converts "ldrb r12, [r1, r2]!" into
37    # "ldrb r12, [r1, r2]", "add r1, r1, r2".
38    s/^(\s*)(ldrb)(\s+)(r\d+),\s*\[(\w+),\s*(\w+)\]!/$1$2$3$4, [$5, $6]\n$1add $3$5, $6/g;
39
40    # Convert negative register indexing into separate sub/add instructions.
41    # This converts "ldrne r4, [src, -pstep, lsl #1]" into
42    # "subne src, src, pstep, lsl #1", "ldrne r4, [src]",
43    # "addne src, src, pstep, lsl #1". In a couple of cases where
44    # this is used, it's used for two subsequent load instructions,
45    # where a hand-written version of it could merge two subsequent
46    # add and sub instructions.
47    s/^(\s*)((ldr|str|pld)(ne)?)(\s+)(r\d+,\s*)?\[(\w+), -([^\]]+)\]/$1sub$4$5$7, $7, $8\n$1$2$5$6\[$7\]\n$1add$4$5$7, $7, $8/g;
48
49    # Convert register post indexing to a separate add instruction.
50    # This converts "ldrneb r9, [r0], r2" into "ldrneb r9, [r0]",
51    # "addne r0, r0, r2".
52    s/^(\s*)((ldr|str)(ne)?[bhd]?)(\s+)(\w+),(\s*\w+,)?\s*\[(\w+)\],\s*(\w+)/$1$2$5$6,$7 [$8]\n$1add$4$5$8, $8, $9/g;
53
54    # Convert "mov pc, lr" into "bx lr", since the former only works
55    # for switching from arm to thumb (and only in armv7), but not
56    # from thumb to arm.
57    s/mov(\s*)pc\s*,\s*lr/bx$1lr/g;
58}
59
601;
61