upload android base code part3

This commit is contained in:
August 2018-08-08 16:48:17 +08:00
parent 71b83c22f1
commit b9e30e05b1
15122 changed files with 2089659 additions and 0 deletions

View file

@ -0,0 +1,49 @@
# Copyright (C) 2016 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Makefile for the Art fast interpreter. This is not currently
# integrated into the build system.
#
SHELL := /bin/sh
# Build system has TARGET_ARCH=arm, but we can support the exact architecture
# if it is worthwhile.
#
# To generate sources:
# for arch in arm arm64 x86 x86_64 mips mips64
# do
# TARGET_ARCH_EXT=$arch make -f Makefile-mterp
# done
#
OUTPUT_DIR := out
# Accumulate all possible dependencies for the generated files in a very
# conservative fashion. If it's not one of the generated files in "out",
# assume it's a dependency.
SOURCE_DEPS := \
$(shell find . -path ./$(OUTPUT_DIR) -prune -o -type f -print) \
# Source files generated by the script. There's always one C and one
# assembly file, though in practice one or the other could be empty.
GEN_SOURCES := \
$(OUTPUT_DIR)/interp_asm_$(TARGET_ARCH_EXT).S
target: $(GEN_SOURCES)
$(GEN_SOURCES): $(SOURCE_DEPS)
@mkdir -p out
./gen_mterp.py $(TARGET_ARCH_EXT) $(OUTPUT_DIR)

View file

@ -0,0 +1,197 @@
rt "mterp" README
NOTE: Find rebuilding instructions at the bottom of this file.
==== Overview ====
Every configuration has a "config-*" file that controls how the sources
are generated. The sources are written into the "out" directory, where
they are picked up by the Android build system.
The best way to become familiar with the interpreter is to look at the
generated files in the "out" directory.
==== Config file format ====
The config files are parsed from top to bottom. Each line in the file
may be blank, hold a comment (line starts with '#'), or be a command.
The commands are:
handler-style <computed-goto|jump-table>
Specify which style of interpreter to generate. In computed-goto,
each handler is allocated a fixed region, allowing transitions to
be done via table-start-address + (opcode * handler-size). With
jump-table style, handlers may be of any length, and the generated
table is an array of pointers to the handlers. This command is required,
and must be the first command in the config file.
handler-size <bytes>
Specify the size of the fixed region, in bytes. On most platforms
this will need to be a power of 2. For jump-table implementations,
this command is ignored.
import <filename>
The specified file is included immediately, in its entirety. No
substitutions are performed. ".cpp" and ".h" files are copied to the
C output, ".S" files are copied to the asm output.
asm-alt-stub <filename>
When present, this command will cause the generation of an alternate
set of entry points (for computed-goto interpreters) or an alternate
jump table (for jump-table interpreters).
fallback-stub <filename>
Specifies a file to be used for the special FALLBACK tag on the "op"
command below. Intended to be used to transfer control to an alternate
interpreter to single-step a not-yet-implemented opcode. Note: should
note be used on RETURN-class instructions.
op-start <directory>
Indicates the start of the opcode list. Must precede any "op"
commands. The specified directory is the default location to pull
instruction files from.
op <opcode> <directory>|FALLBACK
Can only appear after "op-start" and before "op-end". Overrides the
default source file location of the specified opcode. The opcode
definition will come from the specified file, e.g. "op OP_NOP arm"
will load from "arm/OP_NOP.S". A substitution dictionary will be
applied (see below). If the special "FALLBACK" token is used instead of
a directory name, the source file specified in fallback-stub will instead
be used for this opcode.
alt <opcode> <directory>
Can only appear after "op-start" and before "op-end". Similar to the
"op" command above, but denotes a source file to override the entry
in the alternate handler table. The opcode definition will come from
the specified file, e.g. "alt OP_NOP arm" will load from
"arm/ALT_OP_NOP.S". A substitution dictionary will be applied
(see below).
op-end
Indicates the end of the opcode list. All kNumPackedOpcodes
opcodes are emitted when this is seen, followed by any code that
didn't fit inside the fixed-size instruction handler space.
The order of "op" and "alt" directives are not significant; the generation
tool will extract ordering info from the VM sources.
Typically the form in which most opcodes currently exist is used in
the "op-start" directive.
==== Instruction file format ====
The assembly instruction files are simply fragments of assembly sources.
The starting label will be provided by the generation tool, as will
declarations for the segment type and alignment. The expected target
assembler is GNU "as", but others will work (may require fiddling with
some of the pseudo-ops emitted by the generation tool).
A substitution dictionary is applied to all opcode fragments as they are
appended to the output. Substitutions can look like "$value" or "${value}".
The dictionary always includes:
$opcode - opcode name, e.g. "OP_NOP"
$opnum - opcode number, e.g. 0 for OP_NOP
$handler_size_bytes - max size of an instruction handler, in bytes
$handler_size_bits - max size of an instruction handler, log 2
Both C and assembly sources will be passed through the C pre-processor,
so you can take advantage of C-style comments and preprocessor directives
like "#define".
Some generator operations are available.
%include "filename" [subst-dict]
Includes the file, which should look like "arm/OP_NOP.S". You can
specify values for the substitution dictionary, using standard Python
syntax. For example, this:
%include "arm/unop.S" {"result":"r1"}
would insert "arm/unop.S" at the current file position, replacing
occurrences of "$result" with "r1".
%default <subst-dict>
Specify default substitution dictionary values, using standard Python
syntax. Useful if you want to have a "base" version and variants.
%break
Identifies the split between the main portion of the instruction
handler (which must fit in "handler-size" bytes) and the "sister"
code, which is appended to the end of the instruction handler block.
In jump table implementations, %break is ignored.
The generation tool does *not* print a warning if your instructions
exceed "handler-size", but the VM will abort on startup if it detects an
oversized handler. On architectures with fixed-width instructions this
is easy to work with, on others this you will need to count bytes.
==== Using C constants from assembly sources ====
The file "art/runtime/asm_support.h" has some definitions for constant
values, structure sizes, and struct member offsets. The format is fairly
restricted, as simple macros are used to massage it for use with both C
(where it is verified) and assembly (where the definitions are used).
If a constant in the file becomes out of sync, the VM will log an error
message and abort during startup.
==== Development tips ====
If you need to debug the initial piece of an opcode handler, and your
debug code expands it beyond the handler size limit, you can insert a
generic header at the top:
b ${opcode}_start
%break
${opcode}_start:
If you already have a %break, it's okay to leave it in place -- the second
%break is ignored.
==== Rebuilding ====
If you change any of the source file fragments, you need to rebuild the
combined source files in the "out" directory. Make sure the files in
"out" are editable, then:
$ cd mterp
$ ./rebuild.sh
The ultimate goal is to have the build system generate the necessary
output files without requiring this separate step, but we're not yet
ready to require Python in the build.
==== Interpreter Control ====
The mterp fast interpreter achieves much of its performance advantage
over the C++ interpreter through its efficient mechanism of
transitioning from one Dalvik bytecode to the next. Mterp for ARM targets
uses a computed-goto mechanism, in which the handler entrypoints are
located at the base of the handler table + (opcode * 128).
In normal operation, the dedicated register rIBASE
(r8 for ARM, edx for x86) holds a mainHandlerTable. If we need to switch
to a mode that requires inter-instruction checking, rIBASE is changed
to altHandlerTable. Note that this change is not immediate. What is actually
changed is the value of curHandlerTable - which is part of the interpBreak
structure. Rather than explicitly check for changes, each thread will
blindly refresh rIBASE at backward branches, exception throws and returns.

View file

@ -0,0 +1,12 @@
/*
* Inter-instruction transfer stub. Call out to MterpCheckBefore to handle
* any interesting requests and then jump to the real instruction
* handler. Note that the call to MterpCheckBefore is done as a tail call.
*/
.extern MterpCheckBefore
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET] @ refresh IBASE.
adrl lr, artMterpAsmInstructionStart + (${opnum} * 128) @ Addr of primary handler.
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rPC
b MterpCheckBefore @ (self, shadow_frame, dex_pc_ptr) @ Tail call.

View file

@ -0,0 +1,19 @@
/*
* Generic two-operand compare-and-branch operation. Provide a "condition"
* fragment that specifies the comparison to perform.
*
* For: if-eq, if-ne, if-lt, if-ge, if-gt, if-le
*/
/* if-cmp vA, vB, +CCCC */
mov r1, rINST, lsr #12 @ r1<- B
ubfx r0, rINST, #8, #4 @ r0<- A
GET_VREG r3, r1 @ r3<- vB
GET_VREG r0, r0 @ r0<- vA
FETCH_S rINST, 1 @ rINST<- branch offset, in code units
cmp r0, r3 @ compare (vA, vB)
b${condition} MterpCommonTakenBranchNoFlags
cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
beq .L_check_not_taken_osr
FETCH_ADVANCE_INST 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,35 @@
%default {"preinstr":"", "result":"r0", "chkzero":"0"}
/*
* Generic 32-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus. Note that we
* *don't* check for (INT_MIN / -1) here, because the ARM math lib
* handles it correctly.
*
* For: add-int, sub-int, mul-int, div-int, rem-int, and-int, or-int,
* xor-int, shl-int, shr-int, ushr-int, add-float, sub-float,
* mul-float, div-float, rem-float
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
GET_VREG r1, r3 @ r1<- vCC
GET_VREG r0, r2 @ r0<- vBB
.if $chkzero
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 11-14 instructions */

View file

@ -0,0 +1,32 @@
%default {"preinstr":"", "result":"r0", "chkzero":"0"}
/*
* Generic 32-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-int/2addr, sub-int/2addr, mul-int/2addr, div-int/2addr,
* rem-int/2addr, and-int/2addr, or-int/2addr, xor-int/2addr,
* shl-int/2addr, shr-int/2addr, ushr-int/2addr, add-float/2addr,
* sub-float/2addr, mul-float/2addr, div-float/2addr, rem-float/2addr
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
.if $chkzero
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */

View file

@ -0,0 +1,29 @@
%default {"result":"r0", "chkzero":"0"}
/*
* Generic 32-bit "lit16" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-int/lit16, rsub-int, mul-int/lit16, div-int/lit16,
* rem-int/lit16, and-int/lit16, or-int/lit16, xor-int/lit16
*/
/* binop/lit16 vA, vB, #+CCCC */
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
.if $chkzero
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */

View file

@ -0,0 +1,35 @@
%default {"extract":"asr r1, r3, #8", "result":"r0", "chkzero":"0"}
/*
* Generic 32-bit "lit8" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0 op r1".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* You can override "extract" if the extraction of the literal value
* from r3 to r1 is not the default "asr r1, r3, #8". The extraction
* can be omitted completely if the shift is embedded in "instr".
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-int/lit8, rsub-int/lit8, mul-int/lit8, div-int/lit8,
* rem-int/lit8, and-int/lit8, or-int/lit8, xor-int/lit8,
* shl-int/lit8, shr-int/lit8, ushr-int/lit8
*/
/* binop/lit8 vAA, vBB, #+CC */
FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC)
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
$extract @ optional; typically r1<- ssssssCC (sign extended)
.if $chkzero
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
.endif
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ $result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG $result, r9 @ vAA<- $result
GOTO_OPCODE ip @ jump to next instruction
/* 10-12 instructions */

View file

@ -0,0 +1,38 @@
%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
/*
* Generic 64-bit binary operation. Provide an "instr" line that
* specifies an instruction that performs "result = r0-r1 op r2-r3".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* for: add-long, sub-long, div-long, rem-long, and-long, or-long,
* xor-long, add-double, sub-double, mul-double, div-double,
* rem-double
*
* IMPORTANT: you may specify "chkzero" or "preinstr" but not both.
*/
/* binop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov rINST, rINST, lsr #8 @ rINST<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[AA]
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
CLEAR_SHADOW_PAIR rINST, lr, ip @ Zero out the shadow regs
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 14-17 instructions */

View file

@ -0,0 +1,34 @@
%default {"preinstr":"", "result0":"r0", "result1":"r1", "chkzero":"0"}
/*
* Generic 64-bit "/2addr" binary operation. Provide an "instr" line
* that specifies an instruction that performs "result = r0-r1 op r2-r3".
* This could be an ARM instruction or a function call. (If the result
* comes back in a register other than r0, you can override "result".)
*
* If "chkzero" is set to 1, we perform a divide-by-zero check on
* vCC (r1). Useful for integer division and modulus.
*
* For: add-long/2addr, sub-long/2addr, div-long/2addr, rem-long/2addr,
* and-long/2addr, or-long/2addr, xor-long/2addr, add-double/2addr,
* sub-double/2addr, mul-double/2addr, div-double/2addr,
* rem-double/2addr
*/
/* binop/2addr vA, vB */
mov r1, rINST, lsr #12 @ r1<- B
ubfx rINST, rINST, #8, #4 @ rINST<- A
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &fp[B]
VREG_INDEX_TO_ADDR r9, rINST @ r9<- &fp[A]
ldmia r1, {r2-r3} @ r2/r3<- vBB/vBB+1
ldmia r9, {r0-r1} @ r0/r1<- vAA/vAA+1
.if $chkzero
orrs ip, r2, r3 @ second arg (r2-r3) is zero?
beq common_errDivideByZero
.endif
CLEAR_SHADOW_PAIR rINST, ip, lr @ Zero shadow regs
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$preinstr @ optional op; may set condition codes
$instr @ result<- op, r0-r3 changed
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {$result0,$result1} @ vAA/vAA+1<- $result0/$result1
GOTO_OPCODE ip @ jump to next instruction
/* 12-15 instructions */

View file

@ -0,0 +1,77 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Interpreter entry point.
*/
.text
.align 2
.global ExecuteMterpImpl
.type ExecuteMterpImpl, %function
/*
* On entry:
* r0 Thread* self/
* r1 code_item
* r2 ShadowFrame
* r3 JValue* result_register
*
*/
ENTRY ExecuteMterpImpl
stmfd sp!, {r3-r10,fp,lr} @ save 10 regs, (r3 just to align 64)
.cfi_adjust_cfa_offset 40
.cfi_rel_offset r3, 0
.cfi_rel_offset r4, 4
.cfi_rel_offset r5, 8
.cfi_rel_offset r6, 12
.cfi_rel_offset r7, 16
.cfi_rel_offset r8, 20
.cfi_rel_offset r9, 24
.cfi_rel_offset r10, 28
.cfi_rel_offset fp, 32
.cfi_rel_offset lr, 36
/* Remember the return register */
str r3, [r2, #SHADOWFRAME_RESULT_REGISTER_OFFSET]
/* Remember the code_item */
str r1, [r2, #SHADOWFRAME_CODE_ITEM_OFFSET]
/* set up "named" registers */
mov rSELF, r0
ldr r0, [r2, #SHADOWFRAME_NUMBER_OF_VREGS_OFFSET]
add rFP, r2, #SHADOWFRAME_VREGS_OFFSET @ point to vregs.
VREG_INDEX_TO_ADDR rREFS, r0 @ point to reference array in shadow frame
ldr r0, [r2, #SHADOWFRAME_DEX_PC_OFFSET] @ Get starting dex_pc.
add rPC, r1, #CODEITEM_INSNS_OFFSET @ Point to base of insns[]
add rPC, rPC, r0, lsl #1 @ Create direct pointer to 1st dex opcode
EXPORT_PC
/* Starting ibase */
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
/* Set up for backwards branches & osr profiling */
ldr r0, [rFP, #OFF_FP_METHOD]
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpSetUpHotnessCountdown
mov rPROFILE, r0 @ Starting hotness countdown to rPROFILE
/* start executing the instruction at rPC */
FETCH_INST @ load rINST from rPC
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
/* NOTE: no fallthrough */

View file

@ -0,0 +1,3 @@
/* Transfer stub to alternate interpreter */
b MterpFallback

View file

@ -0,0 +1,23 @@
/*
* Generic 32-bit floating-point operation. Provide an "instr" line that
* specifies an instruction that performs "s2 = s0 op s1". Because we
* use the "softfp" ABI, this must be an instruction, not a function call.
*
* For: add-float, sub-float, mul-float, div-float
*/
/* floatop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
flds s1, [r3] @ s1<- vCC
flds s0, [r2] @ s0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fsts s2, [r9] @ vAA<- s2
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,19 @@
/*
* Generic 32-bit floating point "/2addr" binary operation. Provide
* an "instr" line that specifies an instruction that performs
* "s2 = s0 op s1".
*
* For: add-float/2addr, sub-float/2addr, mul-float/2addr, div-float/2addr
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
flds s1, [r3] @ s1<- vB
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
flds s0, [r9] @ s0<- vA
$instr @ s2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fsts s2, [r9] @ vAA<- s2
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,23 @@
/*
* Generic 64-bit double-precision floating point binary operation.
* Provide an "instr" line that specifies an instruction that performs
* "d2 = d0 op d1".
*
* for: add-double, sub-double, mul-double, div-double
*/
/* doubleop vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
fldd d1, [r3] @ d1<- vCC
fldd d0, [r2] @ d0<- vBB
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$instr @ s2<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vAA
fstd d2, [r9] @ vAA<- d2
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,21 @@
/*
* Generic 64-bit floating point "/2addr" binary operation. Provide
* an "instr" line that specifies an instruction that performs
* "d2 = d0 op d1".
*
* For: add-double/2addr, sub-double/2addr, mul-double/2addr,
* div-double/2addr
*/
/* binop/2addr vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
CLEAR_SHADOW_PAIR r9, ip, r0 @ Zero out shadow regs
fldd d1, [r3] @ d1<- vB
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
fldd d0, [r9] @ d0<- vA
$instr @ d2<- op
GET_INST_OPCODE ip @ extract opcode from rINST
fstd d2, [r9] @ vAA<- d2
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,298 @@
/*
* ===========================================================================
* Common subroutines and data
* ===========================================================================
*/
.text
.align 2
/*
* We've detected a condition that will result in an exception, but the exception
* has not yet been thrown. Just bail out to the reference interpreter to deal with it.
* TUNING: for consistency, we may want to just go ahead and handle these here.
*/
common_errDivideByZero:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogDivideByZeroException
#endif
b MterpCommonFallback
common_errArrayIndex:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogArrayIndexException
#endif
b MterpCommonFallback
common_errNegativeArraySize:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogNegativeArraySizeException
#endif
b MterpCommonFallback
common_errNoSuchMethod:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogNoSuchMethodException
#endif
b MterpCommonFallback
common_errNullObject:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogNullObjectException
#endif
b MterpCommonFallback
common_exceptionThrown:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogExceptionThrownException
#endif
b MterpCommonFallback
MterpSuspendFallback:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
ldr r2, [rSELF, #THREAD_FLAGS_OFFSET]
bl MterpLogSuspendFallback
#endif
b MterpCommonFallback
/*
* If we're here, something is out of the ordinary. If there is a pending
* exception, handle it. Otherwise, roll back and retry with the reference
* interpreter.
*/
MterpPossibleException:
ldr r0, [rSELF, #THREAD_EXCEPTION_OFFSET]
cmp r0, #0 @ Exception pending?
beq MterpFallback @ If not, fall back to reference interpreter.
/* intentional fallthrough - handle pending exception. */
/*
* On return from a runtime helper routine, we've found a pending exception.
* Can we handle it here - or need to bail out to caller?
*
*/
MterpException:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpHandleException @ (self, shadow_frame)
cmp r0, #0
beq MterpExceptionReturn @ no local catch, back to caller.
ldr r0, [rFP, #OFF_FP_CODE_ITEM]
ldr r1, [rFP, #OFF_FP_DEX_PC]
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
add rPC, r0, #CODEITEM_INSNS_OFFSET
add rPC, rPC, r1, lsl #1 @ generate new dex_pc_ptr
/* Do we need to switch interpreters? */
bl MterpShouldSwitchInterpreters
cmp r0, #0
bne MterpFallback
/* resume execution at catch block */
EXPORT_PC
FETCH_INST
GET_INST_OPCODE ip
GOTO_OPCODE ip
/* NOTE: no fallthrough */
/*
* Common handling for branches with support for Jit profiling.
* On entry:
* rINST <= signed offset
* rPROFILE <= signed hotness countdown (expanded to 32 bits)
* condition bits <= set to establish sign of offset (use "NoFlags" entry if not)
*
* We have quite a few different cases for branch profiling, OSR detection and
* suspend check support here.
*
* Taken backward branches:
* If profiling active, do hotness countdown and report if we hit zero.
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
* Is there a pending suspend request? If so, suspend.
*
* Taken forward branches and not-taken backward branches:
* If in osr check mode, see if our target is a compiled loop header entry and do OSR if so.
*
* Our most common case is expected to be a taken backward branch with active jit profiling,
* but no full OSR check and no pending suspend request.
* Next most common case is not-taken branch with no full OSR check.
*
*/
MterpCommonTakenBranchNoFlags:
cmp rINST, #0
MterpCommonTakenBranch:
bgt .L_forward_branch @ don't add forward branches to hotness
/*
* We need to subtract 1 from positive values and we should not see 0 here,
* so we may use the result of the comparison with -1.
*/
#if JIT_CHECK_OSR != -1
# error "JIT_CHECK_OSR must be -1."
#endif
cmp rPROFILE, #JIT_CHECK_OSR
beq .L_osr_check
subgts rPROFILE, #1
beq .L_add_batch @ counted down to zero - report
.L_resume_backward_branch:
ldr lr, [rSELF, #THREAD_FLAGS_OFFSET]
REFRESH_IBASE
add r2, rINST, rINST @ r2<- byte offset
FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
ands lr, #THREAD_SUSPEND_OR_CHECKPOINT_REQUEST
bne .L_suspend_request_pending
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
.L_suspend_request_pending:
EXPORT_PC
mov r0, rSELF
bl MterpSuspendCheck @ (self)
cmp r0, #0
bne MterpFallback
REFRESH_IBASE @ might have changed during suspend
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
.L_no_count_backwards:
cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
bne .L_resume_backward_branch
.L_osr_check:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rINST
EXPORT_PC
bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
cmp r0, #0
bne MterpOnStackReplacement
b .L_resume_backward_branch
.L_forward_branch:
cmp rPROFILE, #JIT_CHECK_OSR @ possible OSR re-entry?
beq .L_check_osr_forward
.L_resume_forward_branch:
add r2, rINST, rINST @ r2<- byte offset
FETCH_ADVANCE_INST_RB r2 @ update rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
.L_check_osr_forward:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rINST
EXPORT_PC
bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
cmp r0, #0
bne MterpOnStackReplacement
b .L_resume_forward_branch
.L_add_batch:
add r1, rFP, #OFF_FP_SHADOWFRAME
strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
ldr r0, [rFP, #OFF_FP_METHOD]
mov r2, rSELF
bl MterpAddHotnessBatch @ (method, shadow_frame, self)
mov rPROFILE, r0 @ restore new hotness countdown to rPROFILE
b .L_no_count_backwards
/*
* Entered from the conditional branch handlers when OSR check request active on
* not-taken path. All Dalvik not-taken conditional branch offsets are 2.
*/
.L_check_not_taken_osr:
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, #2
EXPORT_PC
bl MterpMaybeDoOnStackReplacement @ (self, shadow_frame, offset)
cmp r0, #0
bne MterpOnStackReplacement
FETCH_ADVANCE_INST 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction
/*
* On-stack replacement has happened, and now we've returned from the compiled method.
*/
MterpOnStackReplacement:
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rINST
bl MterpLogOSR
#endif
mov r0, #1 @ Signal normal return
b MterpDone
/*
* Bail out to reference interpreter.
*/
MterpFallback:
EXPORT_PC
#if MTERP_LOGGING
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
bl MterpLogFallback
#endif
MterpCommonFallback:
mov r0, #0 @ signal retry with reference interpreter.
b MterpDone
/*
* We pushed some registers on the stack in ExecuteMterpImpl, then saved
* SP and LR. Here we restore SP, restore the registers, and then restore
* LR to PC.
*
* On entry:
* uint32_t* rFP (should still be live, pointer to base of vregs)
*/
MterpExceptionReturn:
mov r0, #1 @ signal return to caller.
b MterpDone
MterpReturn:
ldr r2, [rFP, #OFF_FP_RESULT_REGISTER]
str r0, [r2]
str r1, [r2, #4]
mov r0, #1 @ signal return to caller.
MterpDone:
/*
* At this point, we expect rPROFILE to be non-zero. If negative, hotness is disabled or we're
* checking for OSR. If greater than zero, we might have unreported hotness to register
* (the difference between the ending rPROFILE and the cached hotness counter). rPROFILE
* should only reach zero immediately after a hotness decrement, and is then reset to either
* a negative special state or the new non-zero countdown value.
*/
cmp rPROFILE, #0
bgt MterpProfileActive @ if > 0, we may have some counts to report.
ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
MterpProfileActive:
mov rINST, r0 @ stash return value
/* Report cached hotness counts */
ldr r0, [rFP, #OFF_FP_METHOD]
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rSELF
strh rPROFILE, [r1, #SHADOWFRAME_HOTNESS_COUNTDOWN_OFFSET]
bl MterpAddHotnessBatch @ (method, shadow_frame, self)
mov r0, rINST @ restore return value
ldmfd sp!, {r3-r10,fp,pc} @ restore 10 regs and return
END ExecuteMterpImpl

View file

@ -0,0 +1,17 @@
/*
* Generic 32-bit unary floating-point operation. Provide an "instr"
* line that specifies an instruction that performs "s1 = op s0".
*
* for: int-to-float, float-to-int
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s1<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
fsts s1, [r9] @ vA<- s1
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,17 @@
/*
* Generic 64bit-to-32bit unary floating point operation. Provide an
* "instr" line that specifies an instruction that performs "s0 = op d0".
*
* For: double-to-int, double-to-float
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
fldd d0, [r3] @ d0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ s0<- op
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
fsts s0, [r9] @ vA<- s0
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,18 @@
/*
* Generic 32bit-to-64bit floating point unary operation. Provide an
* "instr" line that specifies an instruction that performs "d0 = op s0".
*
* For: int-to-double, float-to-double
*/
/* unop vA, vB */
mov r3, rINST, lsr #12 @ r3<- B
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vB
flds s0, [r3] @ s0<- vB
ubfx r9, rINST, #8, #4 @ r9<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
$instr @ d0<- op
CLEAR_SHADOW_PAIR r9, ip, lr @ Zero shadow regs
GET_INST_OPCODE ip @ extract opcode from rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &vA
fstd d0, [r9] @ vA<- d0
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,310 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
Art assembly interpreter notes:
First validate assembly code by implementing ExecuteXXXImpl() style body (doesn't
handle invoke, allows higher-level code to create frame & shadow frame.
Once that's working, support direct entry code & eliminate shadow frame (and
excess locals allocation.
Some (hopefully) temporary ugliness. We'll treat rFP as pointing to the
base of the vreg array within the shadow frame. Access the other fields,
dex_pc_, method_ and number_of_vregs_ via negative offsets. For now, we'll continue
the shadow frame mechanism of double-storing object references - via rFP &
number_of_vregs_.
*/
/*
ARM EABI general notes:
r0-r3 hold first 4 args to a method; they are not preserved across method calls
r4-r8 are available for general use
r9 is given special treatment in some situations, but not for us
r10 (sl) seems to be generally available
r11 (fp) is used by gcc (unless -fomit-frame-pointer is set)
r12 (ip) is scratch -- not preserved across method calls
r13 (sp) should be managed carefully in case a signal arrives
r14 (lr) must be preserved
r15 (pc) can be tinkered with directly
r0 holds returns of <= 4 bytes
r0-r1 hold returns of 8 bytes, low word in r0
Callee must save/restore r4+ (except r12) if it modifies them. If VFP
is present, registers s16-s31 (a/k/a d8-d15, a/k/a q4-q7) must be preserved,
s0-s15 (d0-d7, q0-a3) do not need to be.
Stack is "full descending". Only the arguments that don't fit in the first 4
registers are placed on the stack. "sp" points at the first stacked argument
(i.e. the 5th arg).
VFP: single-precision results in s0, double-precision results in d0.
In the EABI, "sp" must be 64-bit aligned on entry to a function, and any
64-bit quantities (long long, double) must be 64-bit aligned.
*/
/*
Mterp and ARM notes:
The following registers have fixed assignments:
reg nick purpose
r4 rPC interpreted program counter, used for fetching instructions
r5 rFP interpreted frame pointer, used for accessing locals and args
r6 rSELF self (Thread) pointer
r7 rINST first 16-bit code unit of current instruction
r8 rIBASE interpreted instruction base pointer, used for computed goto
r10 rPROFILE branch profiling countdown
r11 rREFS base of object references in shadow frame (ideally, we'll get rid of this later).
Macros are provided for common operations. Each macro MUST emit only
one instruction to make instruction-counting easier. They MUST NOT alter
unspecified registers or condition codes.
*/
/*
* This is a #include, not a %include, because we want the C pre-processor
* to expand the macros into assembler assignment statements.
*/
#include "asm_support.h"
#define MTERP_PROFILE_BRANCHES 1
#define MTERP_LOGGING 0
/* During bringup, we'll use the shadow frame model instead of rFP */
/* single-purpose registers, given names for clarity */
#define rPC r4
#define rFP r5
#define rSELF r6
#define rINST r7
#define rIBASE r8
#define rPROFILE r10
#define rREFS r11
/*
* Instead of holding a pointer to the shadow frame, we keep rFP at the base of the vregs. So,
* to access other shadow frame fields, we need to use a backwards offset. Define those here.
*/
#define OFF_FP(a) (a - SHADOWFRAME_VREGS_OFFSET)
#define OFF_FP_NUMBER_OF_VREGS OFF_FP(SHADOWFRAME_NUMBER_OF_VREGS_OFFSET)
#define OFF_FP_DEX_PC OFF_FP(SHADOWFRAME_DEX_PC_OFFSET)
#define OFF_FP_LINK OFF_FP(SHADOWFRAME_LINK_OFFSET)
#define OFF_FP_METHOD OFF_FP(SHADOWFRAME_METHOD_OFFSET)
#define OFF_FP_RESULT_REGISTER OFF_FP(SHADOWFRAME_RESULT_REGISTER_OFFSET)
#define OFF_FP_DEX_PC_PTR OFF_FP(SHADOWFRAME_DEX_PC_PTR_OFFSET)
#define OFF_FP_CODE_ITEM OFF_FP(SHADOWFRAME_CODE_ITEM_OFFSET)
#define OFF_FP_SHADOWFRAME OFF_FP(0)
/*
* "export" the PC to dex_pc field in the shadow frame, f/b/o future exception objects. Must
* be done *before* something throws.
*
* It's okay to do this more than once.
*
* NOTE: the fast interpreter keeps track of dex pc as a direct pointer to the mapped
* dex byte codes. However, the rest of the runtime expects dex pc to be an instruction
* offset into the code_items_[] array. For effiency, we will "export" the
* current dex pc as a direct pointer using the EXPORT_PC macro, and rely on GetDexPC
* to convert to a dex pc when needed.
*/
.macro EXPORT_PC
str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
.endm
.macro EXPORT_DEX_PC tmp
ldr \tmp, [rFP, #OFF_FP_CODE_ITEM]
str rPC, [rFP, #OFF_FP_DEX_PC_PTR]
add \tmp, #CODEITEM_INSNS_OFFSET
sub \tmp, rPC, \tmp
asr \tmp, #1
str \tmp, [rFP, #OFF_FP_DEX_PC]
.endm
/*
* Fetch the next instruction from rPC into rINST. Does not advance rPC.
*/
.macro FETCH_INST
ldrh rINST, [rPC]
.endm
/*
* Fetch the next instruction from the specified offset. Advances rPC
* to point to the next instruction. "_count" is in 16-bit code units.
*
* Because of the limited size of immediate constants on ARM, this is only
* suitable for small forward movements (i.e. don't try to implement "goto"
* with this).
*
* This must come AFTER anything that can throw an exception, or the
* exception catch may miss. (This also implies that it must come after
* EXPORT_PC.)
*/
.macro FETCH_ADVANCE_INST count
ldrh rINST, [rPC, #((\count)*2)]!
.endm
/*
* The operation performed here is similar to FETCH_ADVANCE_INST, except the
* src and dest registers are parameterized (not hard-wired to rPC and rINST).
*/
.macro PREFETCH_ADVANCE_INST dreg, sreg, count
ldrh \dreg, [\sreg, #((\count)*2)]!
.endm
/*
* Similar to FETCH_ADVANCE_INST, but does not update rPC. Used to load
* rINST ahead of possible exception point. Be sure to manually advance rPC
* later.
*/
.macro PREFETCH_INST count
ldrh rINST, [rPC, #((\count)*2)]
.endm
/* Advance rPC by some number of code units. */
.macro ADVANCE count
add rPC, #((\count)*2)
.endm
/*
* Fetch the next instruction from an offset specified by _reg. Updates
* rPC to point to the next instruction. "_reg" must specify the distance
* in bytes, *not* 16-bit code units, and may be a signed value.
*
* We want to write "ldrh rINST, [rPC, _reg, lsl #1]!", but some of the
* bits that hold the shift distance are used for the half/byte/sign flags.
* In some cases we can pre-double _reg for free, so we require a byte offset
* here.
*/
.macro FETCH_ADVANCE_INST_RB reg
ldrh rINST, [rPC, \reg]!
.endm
/*
* Fetch a half-word code unit from an offset past the current PC. The
* "_count" value is in 16-bit code units. Does not advance rPC.
*
* The "_S" variant works the same but treats the value as signed.
*/
.macro FETCH reg, count
ldrh \reg, [rPC, #((\count)*2)]
.endm
.macro FETCH_S reg, count
ldrsh \reg, [rPC, #((\count)*2)]
.endm
/*
* Fetch one byte from an offset past the current PC. Pass in the same
* "_count" as you would for FETCH, and an additional 0/1 indicating which
* byte of the halfword you want (lo/hi).
*/
.macro FETCH_B reg, count, byte
ldrb \reg, [rPC, #((\count)*2+(\byte))]
.endm
/*
* Put the instruction's opcode field into the specified register.
*/
.macro GET_INST_OPCODE reg
and \reg, rINST, #255
.endm
/*
* Put the prefetched instruction's opcode field into the specified register.
*/
.macro GET_PREFETCHED_OPCODE oreg, ireg
and \oreg, \ireg, #255
.endm
/*
* Begin executing the opcode in _reg. Because this only jumps within the
* interpreter, we don't have to worry about pre-ARMv5 THUMB interwork.
*/
.macro GOTO_OPCODE reg
add pc, rIBASE, \reg, lsl #${handler_size_bits}
.endm
.macro GOTO_OPCODE_BASE base,reg
add pc, \base, \reg, lsl #${handler_size_bits}
.endm
/*
* Get/set the 32-bit value from a Dalvik register.
*/
.macro GET_VREG reg, vreg
ldr \reg, [rFP, \vreg, lsl #2]
.endm
.macro SET_VREG reg, vreg
str \reg, [rFP, \vreg, lsl #2]
mov \reg, #0
str \reg, [rREFS, \vreg, lsl #2]
.endm
.macro SET_VREG_OBJECT reg, vreg, tmpreg
str \reg, [rFP, \vreg, lsl #2]
str \reg, [rREFS, \vreg, lsl #2]
.endm
.macro SET_VREG_SHADOW reg, vreg
str \reg, [rREFS, \vreg, lsl #2]
.endm
/*
* Clear the corresponding shadow regs for a vreg pair
*/
.macro CLEAR_SHADOW_PAIR vreg, tmp1, tmp2
mov \tmp1, #0
add \tmp2, \vreg, #1
SET_VREG_SHADOW \tmp1, \vreg
SET_VREG_SHADOW \tmp1, \tmp2
.endm
/*
* Convert a virtual register index into an address.
*/
.macro VREG_INDEX_TO_ADDR reg, vreg
add \reg, rFP, \vreg, lsl #2 /* WARNING/FIXME: handle shadow frame vreg zero if store */
.endm
/*
* Refresh handler table.
*/
.macro REFRESH_IBASE
ldr rIBASE, [rSELF, #THREAD_CURRENT_IBASE_OFFSET]
.endm
/*
* cfi support macros.
*/
.macro ENTRY name
.arm
.type \name, #function
.hidden \name // Hide this as a global symbol, so we do not incur plt calls.
.global \name
/* Cache alignment for function entry */
.balign 16
\name:
.cfi_startproc
.fnstart
.endm
.macro END name
.fnend
.cfi_endproc
.size \name, .-\name
.endm

View file

@ -0,0 +1,22 @@
%default { "helper":"UndefinedInvokeHandler" }
/*
* Generic invoke handler wrapper.
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, meth@BBBB */
.extern $helper
EXPORT_PC
mov r0, rSELF
add r1, rFP, #OFF_FP_SHADOWFRAME
mov r2, rPC
mov r3, rINST
bl $helper
cmp r0, #0
beq MterpException
FETCH_ADVANCE_INST 3
bl MterpShouldSwitchInterpreters
cmp r0, #0
bne MterpFallback
GET_INST_OPCODE ip
GOTO_OPCODE ip

View file

@ -0,0 +1 @@
%include "arm/fbinopWide.S" {"instr":"faddd d2, d0, d1"}

View file

@ -0,0 +1 @@
%include "arm/fbinopWide2addr.S" {"instr":"faddd d2, d0, d1"}

View file

@ -0,0 +1 @@
%include "arm/fbinop.S" {"instr":"fadds s2, s0, s1"}

View file

@ -0,0 +1 @@
%include "arm/fbinop2addr.S" {"instr":"fadds s2, s0, s1"}

View file

@ -0,0 +1 @@
%include "arm/binop.S" {"instr":"add r0, r0, r1"}

View file

@ -0,0 +1 @@
%include "arm/binop2addr.S" {"instr":"add r0, r0, r1"}

View file

@ -0,0 +1 @@
%include "arm/binopLit16.S" {"instr":"add r0, r0, r1"}

View file

@ -0,0 +1 @@
%include "arm/binopLit8.S" {"extract":"", "instr":"add r0, r0, r3, asr #8"}

View file

@ -0,0 +1 @@
%include "arm/binopWide.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}

View file

@ -0,0 +1 @@
%include "arm/binopWide2addr.S" {"preinstr":"adds r0, r0, r2", "instr":"adc r1, r1, r3"}

View file

@ -0,0 +1,29 @@
%default { "load":"ldr", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array get, 32 bits or less. vAA <- vBB[vCC].
*
* Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
* instructions. We use a pair of FETCH_Bs instead.
*
* for: aget, aget-boolean, aget-byte, aget-char, aget-short
*
* NOTE: assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B r2, 1, 0 @ r2<- BB
mov r9, rINST, lsr #8 @ r9<- AA
FETCH_B r3, 1, 1 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
$load r2, [r0, #$data_offset] @ r2<- vBB[vCC]
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r2, r9 @ vAA<- r2
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1 @@
%include "arm/op_aget.S" { "load":"ldrb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1 @@
%include "arm/op_aget.S" { "load":"ldrsb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1 @@
%include "arm/op_aget.S" { "load":"ldrh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1,21 @@
/*
* Array object get. vAA <- vBB[vCC].
*
* for: aget-object
*/
/* op vAA, vBB, vCC */
FETCH_B r2, 1, 0 @ r2<- BB
mov r9, rINST, lsr #8 @ r9<- AA
FETCH_B r3, 1, 1 @ r3<- CC
EXPORT_PC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
bl artAGetObjectFromMterp @ (array, index)
ldr r1, [rSELF, #THREAD_EXCEPTION_OFFSET]
PREFETCH_INST 2
cmp r1, #0
bne MterpException
SET_VREG_OBJECT r0, r9
ADVANCE 2
GET_INST_OPCODE ip
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1 @@
%include "arm/op_aget.S" { "load":"ldrsh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1,25 @@
/*
* Array get, 64 bits. vAA <- vBB[vCC].
*
* Arrays of long/double are 64-bit aligned, so it's okay to use LDRD.
*/
/* aget-wide vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldrd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r2-r3} @ vAA/vAA+1<- r2/r3
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1 @@
%include "arm/binop.S" {"instr":"and r0, r0, r1"}

View file

@ -0,0 +1 @@
%include "arm/binop2addr.S" {"instr":"and r0, r0, r1"}

View file

@ -0,0 +1 @@
%include "arm/binopLit16.S" {"instr":"and r0, r0, r1"}

View file

@ -0,0 +1 @@
%include "arm/binopLit8.S" {"extract":"", "instr":"and r0, r0, r3, asr #8"}

View file

@ -0,0 +1 @@
%include "arm/binopWide.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}

View file

@ -0,0 +1 @@
%include "arm/binopWide2addr.S" {"preinstr":"and r0, r0, r2", "instr":"and r1, r1, r3"}

View file

@ -0,0 +1,29 @@
%default { "store":"str", "shift":"2", "data_offset":"MIRROR_INT_ARRAY_DATA_OFFSET" }
/*
* Array put, 32 bits or less. vBB[vCC] <- vAA.
*
* Note: using the usual FETCH/and/shift stuff, this fits in exactly 17
* instructions. We use a pair of FETCH_Bs instead.
*
* for: aput, aput-boolean, aput-byte, aput-char, aput-short
*
* NOTE: this assumes data offset for arrays is the same for all non-wide types.
* If this changes, specialize.
*/
/* op vAA, vBB, vCC */
FETCH_B r2, 1, 0 @ r2<- BB
mov r9, rINST, lsr #8 @ r9<- AA
FETCH_B r3, 1, 1 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #$shift @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_VREG r2, r9 @ r2<- vAA
GET_INST_OPCODE ip @ extract opcode from rINST
$store r2, [r0, #$data_offset] @ vBB[vCC]<- r2
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1 @@
%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BOOLEAN_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1 @@
%include "arm/op_aput.S" { "store":"strb", "shift":"0", "data_offset":"MIRROR_BYTE_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1 @@
%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_CHAR_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1,14 @@
/*
* Store an object into an array. vBB[vCC] <- vAA.
*/
/* op vAA, vBB, vCC */
EXPORT_PC
add r0, rFP, #OFF_FP_SHADOWFRAME
mov r1, rPC
mov r2, rINST
bl MterpAputObject
cmp r0, #0
beq MterpPossibleException
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1 @@
%include "arm/op_aput.S" { "store":"strh", "shift":"1", "data_offset":"MIRROR_SHORT_ARRAY_DATA_OFFSET" }

View file

@ -0,0 +1,24 @@
/*
* Array put, 64 bits. vBB[vCC] <- vAA.
*
* Arrays of long/double are 64-bit aligned, so it's okay to use STRD.
*/
/* aput-wide vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
GET_VREG r0, r2 @ r0<- vBB (array object)
GET_VREG r1, r3 @ r1<- vCC (requested index)
cmp r0, #0 @ null array object?
beq common_errNullObject @ yes, bail
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- arrayObj->length
add r0, r0, r1, lsl #3 @ r0<- arrayObj + index*width
cmp r1, r3 @ compare unsigned index, length
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
bcs common_errArrayIndex @ index >= length, bail
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
ldmia r9, {r2-r3} @ r2/r3<- vAA/vAA+1
GET_INST_OPCODE ip @ extract opcode from rINST
strd r2, [r0, #MIRROR_WIDE_ARRAY_DATA_OFFSET] @ r2/r3<- vBB[vCC]
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,13 @@
/*
* Return the length of an array.
*/
mov r1, rINST, lsr #12 @ r1<- B
ubfx r2, rINST, #8, #4 @ r2<- A
GET_VREG r0, r1 @ r0<- vB (object ref)
cmp r0, #0 @ is object null?
beq common_errNullObject @ yup, fail
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
ldr r3, [r0, #MIRROR_ARRAY_LENGTH_OFFSET] @ r3<- array length
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r3, r2 @ vB<- length
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,17 @@
/*
* Check to see if a cast from one class to another is allowed.
*/
/* check-cast vAA, class@BBBB */
EXPORT_PC
FETCH r0, 1 @ r0<- BBBB
mov r1, rINST, lsr #8 @ r1<- AA
VREG_INDEX_TO_ADDR r1, r1 @ r1<- &object
ldr r2, [rFP, #OFF_FP_METHOD] @ r2<- method
mov r3, rSELF @ r3<- self
bl MterpCheckCast @ (index, &obj, method, self)
PREFETCH_INST 2
cmp r0, #0
bne MterpPossibleException
ADVANCE 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,23 @@
/*
* Compare two 64-bit values. Puts 0, 1, or -1 into the destination
* register based on the results of the comparison.
*/
/* cmp-long vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &fp[BB]
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[CC]
ldmia r2, {r0-r1} @ r0/r1<- vBB/vBB+1
ldmia r3, {r2-r3} @ r2/r3<- vCC/vCC+1
cmp r0, r2
sbcs ip, r1, r3 @ Sets correct CCs for checking LT (but not EQ/NE)
mov ip, #0
mvnlt ip, #0 @ -1
cmpeq r0, r2 @ For correct EQ/NE, we may need to repeat the first CMP
orrne ip, #1
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
SET_VREG ip, r9 @ vAA<- ip
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,34 @@
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x < y) {
* return -1;
* } else if (x > y) {
* return 1;
* } else {
* return 1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
fldd d0, [r2] @ d0<- vBB
fldd d1, [r3] @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
mvnmi r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,34 @@
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x < y) {
* return -1;
* } else if (x > y) {
* return 1;
* } else {
* return 1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
flds s0, [r2] @ s0<- vBB
flds s1, [r3] @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mov r0, #1 @ r0<- 1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
mvnmi r0, #0 @ (less than) r1<- -1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,34 @@
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x > y) {
* return 1;
* } else if (x < y) {
* return -1;
* } else {
* return -1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
fldd d0, [r2] @ d0<- vBB
fldd d1, [r3] @ d1<- vCC
vcmpe.f64 d0, d1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
movgt r0, #1 @ (greater than) r1<- 1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,34 @@
/*
* Compare two floating-point values. Puts 0, 1, or -1 into the
* destination register based on the results of the comparison.
*
* int compare(x, y) {
* if (x == y) {
* return 0;
* } else if (x > y) {
* return 1;
* } else if (x < y) {
* return -1;
* } else {
* return -1;
* }
* }
*/
/* op vAA, vBB, vCC */
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r0, #255 @ r2<- BB
mov r3, r0, lsr #8 @ r3<- CC
VREG_INDEX_TO_ADDR r2, r2 @ r2<- &vBB
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &vCC
flds s0, [r2] @ s0<- vBB
flds s1, [r3] @ s1<- vCC
vcmpe.f32 s0, s1 @ compare (vBB, vCC)
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
mvn r0, #0 @ r0<- -1 (default)
GET_INST_OPCODE ip @ extract opcode from rINST
fmstat @ export status flags
movgt r0, #1 @ (greater than) r1<- 1
moveq r0, #0 @ (equal) r1<- 0
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,9 @@
/* const vAA, #+BBBBbbbb */
mov r3, rINST, lsr #8 @ r3<- AA
FETCH r0, 1 @ r0<- bbbb (low)
FETCH r1, 2 @ r1<- BBBB (high)
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r3 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,7 @@
/* const/16 vAA, #+BBBB */
FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
mov r3, rINST, lsr #8 @ r3<- AA
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
SET_VREG r0, r3 @ vAA<- r0
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,7 @@
/* const/4 vA, #+B */
sbfx r1, rINST, #12, #4 @ r1<- sssssssB (sign-extended)
ubfx r0, rINST, #8, #4 @ r0<- A
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
GET_INST_OPCODE ip @ ip<- opcode from rINST
SET_VREG r1, r0 @ fp[A]<- r1
GOTO_OPCODE ip @ execute next instruction

View file

@ -0,0 +1,13 @@
/* const/class vAA, Class@BBBB */
EXPORT_PC
FETCH r0, 1 @ r0<- BBBB
mov r1, rINST, lsr #8 @ r1<- AA
add r2, rFP, #OFF_FP_SHADOWFRAME
mov r3, rSELF
bl MterpConstClass @ (index, tgt_reg, shadow_frame, self)
PREFETCH_INST 2
cmp r0, #0
bne MterpPossibleException
ADVANCE 2
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,8 @@
/* const/high16 vAA, #+BBBB0000 */
FETCH r0, 1 @ r0<- 0000BBBB (zero-extended)
mov r3, rINST, lsr #8 @ r3<- AA
mov r0, r0, lsl #16 @ r0<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
SET_VREG r0, r3 @ vAA<- r0
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,13 @@
/* const/string vAA, String@BBBB */
EXPORT_PC
FETCH r0, 1 @ r0<- BBBB
mov r1, rINST, lsr #8 @ r1<- AA
add r2, rFP, #OFF_FP_SHADOWFRAME
mov r3, rSELF
bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
PREFETCH_INST 2 @ load rINST
cmp r0, #0 @ fail?
bne MterpPossibleException @ let reference interpreter deal with it.
ADVANCE 2 @ advance rPC
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,15 @@
/* const/string vAA, String@BBBBBBBB */
EXPORT_PC
FETCH r0, 1 @ r0<- bbbb (low)
FETCH r2, 2 @ r2<- BBBB (high)
mov r1, rINST, lsr #8 @ r1<- AA
orr r0, r0, r2, lsl #16 @ r1<- BBBBbbbb
add r2, rFP, #OFF_FP_SHADOWFRAME
mov r3, rSELF
bl MterpConstString @ (index, tgt_reg, shadow_frame, self)
PREFETCH_INST 3 @ advance rPC
cmp r0, #0 @ fail?
bne MterpPossibleException @ let reference interpreter deal with it.
ADVANCE 3 @ advance rPC
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,14 @@
/* const-wide vAA, #+HHHHhhhhBBBBbbbb */
FETCH r0, 1 @ r0<- bbbb (low)
FETCH r1, 2 @ r1<- BBBB (low middle)
FETCH r2, 3 @ r2<- hhhh (high middle)
orr r0, r0, r1, lsl #16 @ r0<- BBBBbbbb (low word)
FETCH r3, 4 @ r3<- HHHH (high)
mov r9, rINST, lsr #8 @ r9<- AA
orr r1, r2, r3, lsl #16 @ r1<- HHHHhhhh (high word)
CLEAR_SHADOW_PAIR r9, r2, r3 @ Zero out the shadow regs
FETCH_ADVANCE_INST 5 @ advance rPC, load rINST
VREG_INDEX_TO_ADDR r9, r9 @ r9<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r9, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,10 @@
/* const-wide/16 vAA, #+BBBB */
FETCH_S r0, 1 @ r0<- ssssBBBB (sign-extended)
mov r3, rINST, lsr #8 @ r3<- AA
mov r1, r0, asr #31 @ r1<- ssssssss
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,12 @@
/* const-wide/32 vAA, #+BBBBbbbb */
FETCH r0, 1 @ r0<- 0000bbbb (low)
mov r3, rINST, lsr #8 @ r3<- AA
FETCH_S r2, 2 @ r2<- ssssBBBB (high)
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
orr r0, r0, r2, lsl #16 @ r0<- BBBBbbbb
CLEAR_SHADOW_PAIR r3, r2, lr @ Zero out the shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
mov r1, r0, asr #31 @ r1<- ssssssss
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,11 @@
/* const-wide/high16 vAA, #+BBBB000000000000 */
FETCH r1, 1 @ r1<- 0000BBBB (zero-extended)
mov r3, rINST, lsr #8 @ r3<- AA
mov r0, #0 @ r0<- 00000000
mov r1, r1, lsl #16 @ r1<- BBBB0000
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
CLEAR_SHADOW_PAIR r3, r0, r2 @ Zero shadow regs
VREG_INDEX_TO_ADDR r3, r3 @ r3<- &fp[AA]
GET_INST_OPCODE ip @ extract opcode from rINST
stmia r3, {r0-r1} @ vAA<- r0/r1
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1 @@
%include "arm/fbinopWide.S" {"instr":"fdivd d2, d0, d1"}

View file

@ -0,0 +1 @@
%include "arm/fbinopWide2addr.S" {"instr":"fdivd d2, d0, d1"}

View file

@ -0,0 +1 @@
%include "arm/fbinop.S" {"instr":"fdivs s2, s0, s1"}

View file

@ -0,0 +1 @@
%include "arm/fbinop2addr.S" {"instr":"fdivs s2, s0, s1"}

View file

@ -0,0 +1,30 @@
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int
*
*/
FETCH r0, 1 @ r0<- CCBB
mov r9, rINST, lsr #8 @ r9<- AA
mov r3, r0, lsr #8 @ r3<- CC
and r2, r0, #255 @ r2<- BB
GET_VREG r1, r3 @ r1<- vCC
GET_VREG r0, r2 @ r0<- vBB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 11-14 instructions */

View file

@ -0,0 +1,29 @@
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int/2addr
*
*/
mov r3, rINST, lsr #12 @ r3<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r1, r3 @ r1<- vB
GET_VREG r0, r9 @ r0<- vA
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 1 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */

View file

@ -0,0 +1,28 @@
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int/lit16
*
*/
FETCH_S r1, 1 @ r1<- ssssCCCC (sign-extended)
mov r2, rINST, lsr #12 @ r2<- B
ubfx r9, rINST, #8, #4 @ r9<- A
GET_VREG r0, r2 @ r0<- vB
cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 10-13 instructions */

View file

@ -0,0 +1,29 @@
%default {}
/*
* Specialized 32-bit binary operation
*
* Performs "r0 = r0 div r1". The selection between sdiv or the gcc helper
* depends on the compile time value of __ARM_ARCH_EXT_IDIV__ (defined for
* ARMv7 CPUs that have hardware division support).
*
* div-int/lit8
*
*/
FETCH_S r3, 1 @ r3<- ssssCCBB (sign-extended for CC
mov r9, rINST, lsr #8 @ r9<- AA
and r2, r3, #255 @ r2<- BB
GET_VREG r0, r2 @ r0<- vBB
movs r1, r3, asr #8 @ r1<- ssssssCC (sign extended)
@cmp r1, #0 @ is second operand zero?
beq common_errDivideByZero
FETCH_ADVANCE_INST 2 @ advance rPC, load rINST
#ifdef __ARM_ARCH_EXT_IDIV__
sdiv r0, r0, r1 @ r0<- op
#else
bl __aeabi_idiv @ r0<- op, r0-r3 changed
#endif
GET_INST_OPCODE ip @ extract opcode from rINST
SET_VREG r0, r9 @ vAA<- r0
GOTO_OPCODE ip @ jump to next instruction
/* 10-12 instructions */

View file

@ -0,0 +1 @@
%include "arm/binopWide.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}

View file

@ -0,0 +1 @@
%include "arm/binopWide2addr.S" {"instr":"bl __aeabi_ldivmod", "chkzero":"1"}

View file

@ -0,0 +1 @@
%include "arm/funopNarrower.S" {"instr":"vcvt.f32.f64 s0, d0"}

View file

@ -0,0 +1 @@
%include "arm/funopNarrower.S" {"instr":"ftosizd s0, d0"}

View file

@ -0,0 +1,33 @@
%include "arm/unopWide.S" {"instr":"bl d2l_doconv"}
%break
/*
* Convert the double in r0/r1 to a long in r0/r1.
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
* to modest integer. The EABI convert function isn't doing this for us.
*/
d2l_doconv:
ubfx r2, r1, #20, #11 @ grab the exponent
movw r3, #0x43e
cmp r2, r3 @ MINLONG < x > MAXLONG?
bhs d2l_special_cases
b __aeabi_d2lz @ tail call to convert double to long
d2l_special_cases:
movw r3, #0x7ff
cmp r2, r3
beq d2l_maybeNaN @ NaN?
d2l_notNaN:
adds r1, r1, r1 @ sign bit to carry
mov r0, #0xffffffff @ assume maxlong for lsw
mov r1, #0x7fffffff @ assume maxlong for msw
adc r0, r0, #0
adc r1, r1, #0 @ convert maxlong to minlong if exp negative
bx lr @ return
d2l_maybeNaN:
orrs r3, r0, r1, lsl #12
beq d2l_notNaN @ if fraction is non-zero, it's a NaN
mov r0, #0
mov r1, #0
bx lr @ return 0 for NaN

View file

@ -0,0 +1,14 @@
/* fill-array-data vAA, +BBBBBBBB */
EXPORT_PC
FETCH r0, 1 @ r0<- bbbb (lo)
FETCH r1, 2 @ r1<- BBBB (hi)
mov r3, rINST, lsr #8 @ r3<- AA
orr r1, r0, r1, lsl #16 @ r1<- BBBBbbbb
GET_VREG r0, r3 @ r0<- vAA (array object)
add r1, rPC, r1, lsl #1 @ r1<- PC + BBBBbbbb*2 (array data off.)
bl MterpFillArrayData @ (obj, payload)
cmp r0, #0 @ 0 means an exception is thrown
beq MterpPossibleException @ exception?
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1,19 @@
%default { "helper":"MterpFilledNewArray" }
/*
* Create a new array with elements filled from registers.
*
* for: filled-new-array, filled-new-array/range
*/
/* op vB, {vD, vE, vF, vG, vA}, class@CCCC */
/* op {vCCCC..v(CCCC+AA-1)}, type@BBBB */
.extern $helper
EXPORT_PC
add r0, rFP, #OFF_FP_SHADOWFRAME
mov r1, rPC
mov r2, rSELF
bl $helper
cmp r0, #0
beq MterpPossibleException
FETCH_ADVANCE_INST 3 @ advance rPC, load rINST
GET_INST_OPCODE ip @ extract opcode from rINST
GOTO_OPCODE ip @ jump to next instruction

View file

@ -0,0 +1 @@
%include "arm/op_filled_new_array.S" { "helper":"MterpFilledNewArrayRange" }

View file

@ -0,0 +1 @@
%include "arm/funopWider.S" {"instr":"vcvt.f64.f32 d0, s0"}

View file

@ -0,0 +1 @@
%include "arm/funop.S" {"instr":"ftosizs s1, s0"}

View file

@ -0,0 +1,31 @@
%include "arm/unopWider.S" {"instr":"bl f2l_doconv"}
%break
/*
* Convert the float in r0 to a long in r0/r1.
*
* We have to clip values to long min/max per the specification. The
* expected common case is a "reasonable" value that converts directly
* to modest integer. The EABI convert function isn't doing this for us.
*/
f2l_doconv:
ubfx r2, r0, #23, #8 @ grab the exponent
cmp r2, #0xbe @ MININT < x > MAXINT?
bhs f2l_special_cases
b __aeabi_f2lz @ tail call to convert float to long
f2l_special_cases:
cmp r2, #0xff @ NaN or infinity?
beq f2l_maybeNaN
f2l_notNaN:
adds r0, r0, r0 @ sign bit to carry
mov r0, #0xffffffff @ assume maxlong for lsw
mov r1, #0x7fffffff @ assume maxlong for msw
adc r0, r0, #0
adc r1, r1, #0 @ convert maxlong to minlong if exp negative
bx lr @ return
f2l_maybeNaN:
lsls r3, r0, #9
beq f2l_notNaN @ if fraction is non-zero, it's a NaN
mov r0, #0
mov r1, #0
bx lr @ return 0 for NaN

View file

@ -0,0 +1,9 @@
/*
* Unconditional branch, 8-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*/
/* goto +AA */
sbfx rINST, rINST, #8, #8 @ rINST<- ssssssAA (sign-extended)
b MterpCommonTakenBranchNoFlags

View file

@ -0,0 +1,9 @@
/*
* Unconditional branch, 16-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*/
/* goto/16 +AAAA */
FETCH_S rINST, 1 @ rINST<- ssssAAAA (sign-extended)
b MterpCommonTakenBranchNoFlags

View file

@ -0,0 +1,16 @@
/*
* Unconditional branch, 32-bit offset.
*
* The branch distance is a signed code-unit offset, which we need to
* double to get a byte offset.
*
* Unlike most opcodes, this one is allowed to branch to itself, so
* our "backward branch" test must be "<=0" instead of "<0". Because
* we need the V bit set, we'll use an adds to convert from Dalvik
* offset to byte offset.
*/
/* goto/32 +AAAAAAAA */
FETCH r0, 1 @ r0<- aaaa (lo)
FETCH r3, 2 @ r1<- AAAA (hi)
orrs rINST, r0, r3, lsl #16 @ rINST<- AAAAaaaa
b MterpCommonTakenBranch

View file

@ -0,0 +1 @@
%include "arm/bincmp.S" { "condition":"eq" }

View file

@ -0,0 +1 @@
%include "arm/zcmp.S" { "condition":"eq" }

View file

@ -0,0 +1 @@
%include "arm/bincmp.S" { "condition":"ge" }

View file

@ -0,0 +1 @@
%include "arm/zcmp.S" { "condition":"ge" }

View file

@ -0,0 +1 @@
%include "arm/bincmp.S" { "condition":"gt" }

View file

@ -0,0 +1 @@
%include "arm/zcmp.S" { "condition":"gt" }

View file

@ -0,0 +1 @@
%include "arm/bincmp.S" { "condition":"le" }

View file

@ -0,0 +1 @@
%include "arm/zcmp.S" { "condition":"le" }

Some files were not shown because too many files have changed in this diff Show more