Merge commit 'e64a00a7fcc1cfa7ac5f81626f85075997f9d8a3' as 'externals/vixl/vixl'
This commit is contained in:
parent
170ab30b8e
commit
1ca401619d
3140 changed files with 9339721 additions and 0 deletions
54
externals/vixl/vixl/.clang-format
vendored
Normal file
54
externals/vixl/vixl/.clang-format
vendored
Normal file
|
@ -0,0 +1,54 @@
|
|||
# Copyright 2016, VIXL authors
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
# * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
BasedOnStyle: Google
|
||||
|
||||
# We keep two empty lines between functions in `.cc` files.
|
||||
MaxEmptyLinesToKeep: 2
|
||||
|
||||
# Either fit all arguments on the same line, or have one per line.
|
||||
# Ideally we would like to allow grouping them when it makes sense. But
|
||||
# `clang-format` cannot know what 'makes sense'.
|
||||
BinPackArguments: false
|
||||
BinPackParameters: false
|
||||
|
||||
PenaltyBreakBeforeFirstCallParameter: 500
|
||||
PenaltyBreakString: 100
|
||||
|
||||
# Do not format comments that contain the `NOLINT` pragma for `cpplint.py`.
|
||||
CommentPragmas: NOLINT
|
||||
|
||||
# Order of #include directives. clang-format will stop at the first rule that
|
||||
# matches so the order in which they are declared is important.
|
||||
IncludeCategories:
|
||||
- Regex: '".*aarch32.*"'
|
||||
Priority: 3
|
||||
- Regex: '".*aarch64.*"'
|
||||
Priority: 3
|
||||
- Regex: '<.*>'
|
||||
Priority: 1
|
||||
- Regex: '".*"'
|
||||
Priority: 2
|
33
externals/vixl/vixl/.clang-tidy
vendored
Normal file
33
externals/vixl/vixl/.clang-tidy
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
# We use the clang-tidy defaults and the Google styles as a baseline, with a
|
||||
# few changes specific to VIXL:
|
||||
# -clang-analyzer-security.insecureAPI.rand:
|
||||
# This warns against the use of mrand48 (etc) and suggests replacing them
|
||||
# with arc4random. However, we are using these to drive tests and debug
|
||||
# tools, and we need the ability to manually seed the generator. This is
|
||||
# not possible with arc4random, and we do not need particularly robust
|
||||
# random numbers, so we continue to use mrand48.
|
||||
# -google-readability-todo:
|
||||
# We don't put names on TODOs.
|
||||
# -google-readability-function-size:
|
||||
# There are cases where we need (or generate) very long functions,
|
||||
# particularly involving macro-generated encoding tables and so on.
|
||||
# -google-build-using-namespace:
|
||||
# We do this in internal contexts (typically in .cc files), but clang-tidy
|
||||
# cannot tell the difference.
|
||||
# -google-explicit-constructor:
|
||||
# We follow this rule, but have some exceptions that are annotated using
|
||||
# cpplint's NOLINT format.
|
||||
#
|
||||
# TODO: The following _should_ be enabled, but currently show several failures:
|
||||
# google-readability-braces-around-statements
|
||||
# google-readability-namespace-comments
|
||||
# google-readability-casting
|
||||
#
|
||||
# TODO: Also consider enabling other rules, such as bugprone-* and cert-*.
|
||||
Checks: '-clang-analyzer-security.insecureAPI.rand,google-*,-google-readability-todo,-google-readability-function-size,-google-build-using-namespace,-google-explicit-constructor,-google-readability-braces-around-statements,-google-readability-namespace-comments,-google-readability-casting'
|
||||
HeaderFilterRegex: '\.h$'
|
||||
AnalyzeTemporaryDtors: false
|
||||
CheckOptions:
|
||||
...
|
||||
|
7
externals/vixl/vixl/.gitignore
vendored
Normal file
7
externals/vixl/vixl/.gitignore
vendored
Normal file
|
@ -0,0 +1,7 @@
|
|||
# ignore python compiled object
|
||||
*.pyc
|
||||
*.html
|
||||
.sconsign.dblite
|
||||
log/
|
||||
obj/
|
||||
tools/.cached_lint_results.pkl
|
4
externals/vixl/vixl/.gitreview
vendored
Normal file
4
externals/vixl/vixl/.gitreview
vendored
Normal file
|
@ -0,0 +1,4 @@
|
|||
[gerrit]
|
||||
host=review.linaro.org
|
||||
port=29418
|
||||
project=arm/vixl
|
38
externals/vixl/vixl/.ycm_extra_conf.py
vendored
Normal file
38
externals/vixl/vixl/.ycm_extra_conf.py
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
# Vim YouCompleteMe completion configuration.
|
||||
#
|
||||
# See doc/topics/ycm.md for details.
|
||||
|
||||
import os
|
||||
import platform
|
||||
|
||||
repo_root = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Paths in the compilation flags must be absolute to allow ycm to find them from
|
||||
# any working directory.
|
||||
def AbsolutePath(path):
|
||||
return os.path.join(repo_root, path)
|
||||
|
||||
flags = [
|
||||
'-I', AbsolutePath('src'),
|
||||
'-I', AbsolutePath('test'),
|
||||
'-DVIXL_DEBUG'
|
||||
'-Wall',
|
||||
'-Werror',
|
||||
'-Wextra',
|
||||
'-pedantic',
|
||||
'-Wno-newline-eof',
|
||||
'-Wwrite-strings',
|
||||
'-std=c++11',
|
||||
'-x', 'c++'
|
||||
]
|
||||
|
||||
if platform.machine() != 'aarch64':
|
||||
flags.append('-DVIXL_INCLUDE_SIMULATOR_AARCH64')
|
||||
|
||||
|
||||
def FlagsForFile(filename, **kwargs):
|
||||
return {
|
||||
'flags': flags,
|
||||
'do_cache': True
|
||||
}
|
||||
|
8
externals/vixl/vixl/AUTHORS
vendored
Normal file
8
externals/vixl/vixl/AUTHORS
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
# Below is a list of people and organisations that have contributed to the VIXL
|
||||
# project. Entries should be added to the list as:
|
||||
#
|
||||
# Name/Organization <email address>
|
||||
|
||||
ARM Ltd. <*@arm.com>
|
||||
Google Inc. <*@google.com>
|
||||
Linaro <*@linaro.org>
|
38
externals/vixl/vixl/CPPLINT.cfg
vendored
Normal file
38
externals/vixl/vixl/CPPLINT.cfg
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
# Stop cpplint for looking for CPPLINT.cfg outside of vixl.
|
||||
set noparent
|
||||
filter=+build/class
|
||||
filter=+build/deprecated
|
||||
filter=+build/forward_decl
|
||||
filter=+build/include_order
|
||||
filter=+build/printf_format
|
||||
filter=+build/storage_class
|
||||
filter=+legal/copyright
|
||||
filter=+readability/boost
|
||||
filter=+readability/braces
|
||||
filter=+readability/casting
|
||||
filter=+readability/constructors
|
||||
filter=+readability/fn_size
|
||||
filter=+readability/function
|
||||
filter=+readability/multiline_comment
|
||||
filter=+readability/multiline_string
|
||||
filter=+readability/streams
|
||||
filter=+readability/utf8
|
||||
filter=+runtime/arrays
|
||||
filter=+runtime/casting
|
||||
filter=+runtime/deprecated_fn
|
||||
filter=+runtime/explicit
|
||||
filter=+runtime/int
|
||||
filter=+runtime/memset
|
||||
filter=+runtime/mutex
|
||||
filter=+runtime/nonconf
|
||||
filter=+runtime/printf
|
||||
filter=+runtime/printf_format
|
||||
filter=+runtime/references
|
||||
filter=+runtime/rtti
|
||||
filter=+runtime/sizeof
|
||||
filter=+runtime/string
|
||||
filter=+runtime/virtual
|
||||
filter=+runtime/vlog
|
||||
# cpplint.py enables these filters in reversed order.
|
||||
filter=-
|
||||
linelength=80
|
30
externals/vixl/vixl/LICENCE
vendored
Normal file
30
externals/vixl/vixl/LICENCE
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
LICENCE
|
||||
=======
|
||||
|
||||
The software in this repository is covered by the following licence.
|
||||
|
||||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
226
externals/vixl/vixl/README.md
vendored
Normal file
226
externals/vixl/vixl/README.md
vendored
Normal file
|
@ -0,0 +1,226 @@
|
|||
VIXL: Armv8 Runtime Code Generation Library, Version 5.1.0
|
||||
==========================================================
|
||||
|
||||
Contents:
|
||||
|
||||
* Overview
|
||||
* Licence
|
||||
* Requirements
|
||||
* Known limitations
|
||||
* Bug reports
|
||||
* Usage
|
||||
|
||||
|
||||
Overview
|
||||
========
|
||||
|
||||
VIXL contains three components.
|
||||
|
||||
1. Programmatic **assemblers** to generate A64, A32 or T32 code at runtime. The
|
||||
assemblers abstract some of the constraints of each ISA; for example, most
|
||||
instructions support any immediate.
|
||||
2. **Disassemblers** that can print any instruction emitted by the assemblers.
|
||||
3. A **simulator** that can simulate any instruction emitted by the A64
|
||||
assembler. The simulator allows generated code to be run on another
|
||||
architecture without the need for a full ISA model.
|
||||
|
||||
The VIXL git repository can be found [on 'https://git.linaro.org'][vixl].
|
||||
|
||||
Changes from previous versions of VIXL can be found in the
|
||||
[Changelog](doc/changelog.md).
|
||||
|
||||
|
||||
Licence
|
||||
=======
|
||||
|
||||
This software is covered by the licence described in the [LICENCE](LICENCE)
|
||||
file.
|
||||
|
||||
|
||||
Requirements
|
||||
============
|
||||
|
||||
To build VIXL the following software is required:
|
||||
|
||||
1. Python 2.7
|
||||
2. SCons 2.0
|
||||
3. GCC 4.8+ or Clang 4.0+
|
||||
|
||||
A 64-bit host machine is required, implementing an LP64 data model. VIXL has
|
||||
been tested using GCC on AArch64 Debian, GCC and Clang on amd64 Ubuntu
|
||||
systems.
|
||||
|
||||
To run the linter and code formatting stages of the tests, the following
|
||||
software is also required:
|
||||
|
||||
1. Git
|
||||
2. [Google's `cpplint.py`][cpplint]
|
||||
3. clang-format-4.0
|
||||
4. clang-tidy-4.0
|
||||
|
||||
Refer to the 'Usage' section for details.
|
||||
|
||||
Note that in Ubuntu 18.04, clang-tidy-4.0 will only work if the clang-4.0
|
||||
package is also installed.
|
||||
|
||||
|
||||
Known Limitations
|
||||
=================
|
||||
|
||||
VIXL was developed for JavaScript engines so a number of features from A64 were
|
||||
deemed unnecessary:
|
||||
|
||||
* Limited rounding mode support for floating point.
|
||||
* Limited support for synchronisation instructions.
|
||||
* Limited support for system instructions.
|
||||
* A few miscellaneous integer and floating point instructions are missing.
|
||||
|
||||
The VIXL simulator supports only those instructions that the VIXL assembler can
|
||||
generate. The `doc` directory contains a
|
||||
[list of supported A64 instructions](doc/aarch64/supported-instructions-aarch64.md).
|
||||
|
||||
The VIXL simulator was developed to run on 64-bit amd64 platforms. Whilst it
|
||||
builds and mostly works for 32-bit x86 platforms, there are a number of
|
||||
floating-point operations which do not work correctly, and a number of tests
|
||||
fail as a result.
|
||||
|
||||
Debug Builds
|
||||
------------
|
||||
|
||||
Your project's build system must define `VIXL_DEBUG` (eg. `-DVIXL_DEBUG`)
|
||||
when using a VIXL library that has been built with debug enabled.
|
||||
|
||||
Some classes defined in VIXL header files contain fields that are only present
|
||||
in debug builds, so if `VIXL_DEBUG` is defined when the library is built, but
|
||||
not defined for the header files included in your project, you will see runtime
|
||||
failures.
|
||||
|
||||
Exclusive-Access Instructions
|
||||
-----------------------------
|
||||
|
||||
All exclusive-access instructions are supported, but the simulator cannot
|
||||
accurately simulate their behaviour as described in the ARMv8 Architecture
|
||||
Reference Manual.
|
||||
|
||||
* A local monitor is simulated, so simulated exclusive loads and stores execute
|
||||
as expected in a single-threaded environment.
|
||||
* The global monitor is simulated by occasionally causing exclusive-access
|
||||
instructions to fail regardless of the local monitor state.
|
||||
* Load-acquire, store-release semantics are approximated by issuing a host
|
||||
memory barrier after loads or before stores. The built-in
|
||||
`__sync_synchronize()` is used for this purpose.
|
||||
|
||||
The simulator tries to be strict, and implements the following restrictions that
|
||||
the ARMv8 ARM allows:
|
||||
|
||||
* A pair of load-/store-exclusive instructions will only succeed if they have
|
||||
the same address and access size.
|
||||
* Most of the time, cache-maintenance operations or explicit memory accesses
|
||||
will clear the exclusive monitor.
|
||||
* To ensure that simulated code does not depend on this behaviour, the
|
||||
exclusive monitor will sometimes be left intact after these instructions.
|
||||
|
||||
Instructions affected by these limitations:
|
||||
`stxrb`, `stxrh`, `stxr`, `ldxrb`, `ldxrh`, `ldxr`, `stxp`, `ldxp`, `stlxrb`,
|
||||
`stlxrh`, `stlxr`, `ldaxrb`, `ldaxrh`, `ldaxr`, `stlxp`, `ldaxp`, `stlrb`,
|
||||
`stlrh`, `stlr`, `ldarb`, `ldarh`, `ldar`, `clrex`.
|
||||
|
||||
Security Considerations
|
||||
-----------------------
|
||||
|
||||
VIXL allows callers to generate any code they want. The generated code is
|
||||
arbitrary, and can therefore call back into any other component in the process.
|
||||
As with any self-modifying code, vulnerabilities in the client or in VIXL itself
|
||||
could lead to arbitrary code generation.
|
||||
|
||||
For performance reasons, VIXL's Assembler only performs debug-mode checking of
|
||||
instruction operands (such as immediate field encodability). This can minimise
|
||||
code-generation overheads for advanced compilers that already model instructions
|
||||
accurately, and might consider the Assembler's checks to be redundant. The
|
||||
Assembler should only be used directly where encodability is independently
|
||||
checked, and where fine control over all generated code is required.
|
||||
|
||||
The MacroAssembler synthesises multiple-instruction sequences to support _some_
|
||||
unencodable operand combinations. The MacroAssembler can provide a useful safety
|
||||
check in cases where the Assembler's precision is not required; an unexpected
|
||||
unencodable operand should result in a macro with the correct behaviour, rather
|
||||
than an invalid instruction.
|
||||
|
||||
In general, the MacroAssembler handles operands which are likely to vary with
|
||||
user-supplied data, but does not usually handle inputs which are likely to be
|
||||
easily covered by tests. For example, move-immediate arguments are likely to be
|
||||
data-dependent, but register types (e.g. `x` vs `w`) are not.
|
||||
|
||||
We recommend that _all_ users use the MacroAssembler, using `ExactAssemblyScope`
|
||||
to invoke the Assembler when specific instruction sequences are required. This
|
||||
approach is recommended even in cases where a compiler can model the
|
||||
instructions precisely, because, subject to the limitations described above, it
|
||||
offers an additional layer of protection against logic bugs in instruction
|
||||
selection.
|
||||
|
||||
Bug reports
|
||||
===========
|
||||
|
||||
Bug reports may be sent to vixl@arm.com. Please provide any steps required to
|
||||
recreate a bug, along with build environment and host system information.
|
||||
|
||||
|
||||
Usage
|
||||
=====
|
||||
|
||||
Running all Tests
|
||||
-----------------
|
||||
|
||||
The helper script `tools/test.py` will build and run every test that is provided
|
||||
with VIXL, in both release and debug mode. It is a useful script for verifying
|
||||
that all of VIXL's dependencies are in place and that VIXL is working as it
|
||||
should.
|
||||
|
||||
By default, the `tools/test.py` script runs a linter to check that the source
|
||||
code conforms with the code style guide, and to detect several common errors
|
||||
that the compiler may not warn about. This is most useful for VIXL developers.
|
||||
The linter has the following dependencies:
|
||||
|
||||
1. Git must be installed, and the VIXL project must be in a valid Git
|
||||
repository, such as one produced using `git clone`.
|
||||
2. `cpplint.py`, [as provided by Google][cpplint], must be available (and
|
||||
executable) on the `PATH`.
|
||||
|
||||
It is possible to tell `tools/test.py` to skip the linter stage by passing
|
||||
`--nolint`. This removes the dependency on `cpplint.py` and Git. The `--nolint`
|
||||
option is implied if the VIXL project is a snapshot (with no `.git` directory).
|
||||
|
||||
Additionally, `tools/test.py` tests code formatting using `clang-format-4.0`,
|
||||
and performs static analysis using `clang-tidy-4.0`. If you don't have these
|
||||
tools, disable the test using `--noclang-format` or `--noclang-tidy`,
|
||||
respectively.
|
||||
|
||||
Also note that the tests for the tracing features depend upon external `diff`
|
||||
and `sed` tools. If these tools are not available in `PATH`, these tests will
|
||||
fail.
|
||||
|
||||
Getting Started
|
||||
---------------
|
||||
|
||||
We have separate guides for introducing VIXL, depending on what architecture you
|
||||
are targeting. A guide for working with AArch32 can be found
|
||||
[here][getting-started-aarch32], while the AArch64 guide is
|
||||
[here][getting-started-aarch64]. Example source code is provided in the
|
||||
[examples](examples) directory. You can build examples with either `scons
|
||||
aarch32_examples` or `scons aarch64_examples` from the root directory, or use
|
||||
`scons --help` to get a detailed list of available build targets.
|
||||
|
||||
|
||||
|
||||
|
||||
[cpplint]: http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
|
||||
"Google's cpplint.py script."
|
||||
|
||||
[vixl]: https://git.linaro.org/arm/vixl.git
|
||||
"The VIXL repository at 'https://git.linaro.org'."
|
||||
|
||||
[getting-started-aarch32]: doc/aarch32/getting-started-aarch32.md
|
||||
"Introduction to VIXL for AArch32."
|
||||
|
||||
[getting-started-aarch64]: doc/aarch64/getting-started-aarch64.md
|
||||
"Introduction to VIXL for AArch64."
|
597
externals/vixl/vixl/SConstruct
vendored
Normal file
597
externals/vixl/vixl/SConstruct
vendored
Normal file
|
@ -0,0 +1,597 @@
|
|||
# Copyright 2015, VIXL authors
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice,
|
||||
# this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
# * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
# used to endorse or promote products derived from this software without
|
||||
# specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import glob
|
||||
import itertools
|
||||
import os
|
||||
from os.path import join
|
||||
import platform
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
|
||||
root_dir = os.path.dirname(File('SConstruct').rfile().abspath)
|
||||
sys.path.insert(0, join(root_dir, 'tools'))
|
||||
import config
|
||||
import util
|
||||
|
||||
from SCons.Errors import UserError
|
||||
|
||||
|
||||
Help('''
|
||||
Build system for the VIXL project.
|
||||
See README.md for documentation and details about the build system.
|
||||
''')
|
||||
|
||||
|
||||
# We track top-level targets to automatically generate help and alias them.
|
||||
class VIXLTargets:
|
||||
def __init__(self):
|
||||
self.targets = []
|
||||
self.help_messages = []
|
||||
def Add(self, target, help_message):
|
||||
self.targets.append(target)
|
||||
self.help_messages.append(help_message)
|
||||
def Help(self):
|
||||
res = ""
|
||||
for i in range(len(self.targets)):
|
||||
res += '\t{0:<{1}}{2:<{3}}\n'.format(
|
||||
'scons ' + self.targets[i],
|
||||
len('scons ') + max(map(len, self.targets)),
|
||||
' : ' + self.help_messages[i],
|
||||
len(' : ') + max(map(len, self.help_messages)))
|
||||
return res
|
||||
|
||||
top_level_targets = VIXLTargets()
|
||||
|
||||
|
||||
|
||||
# Build options ----------------------------------------------------------------
|
||||
|
||||
# Store all the options in a dictionary.
|
||||
# The SConstruct will check the build variables and construct the build
|
||||
# environment as appropriate.
|
||||
options = {
|
||||
'all' : { # Unconditionally processed.
|
||||
'CCFLAGS' : ['-Wall',
|
||||
'-Werror',
|
||||
'-fdiagnostics-show-option',
|
||||
'-Wextra',
|
||||
'-Wredundant-decls',
|
||||
'-pedantic',
|
||||
'-Wwrite-strings',
|
||||
'-Wunused',
|
||||
'-Wno-missing-noreturn'],
|
||||
'CPPPATH' : [config.dir_src_vixl]
|
||||
},
|
||||
# 'build_option:value' : {
|
||||
# 'environment_key' : 'values to append'
|
||||
# },
|
||||
'mode:debug' : {
|
||||
'CCFLAGS' : ['-DVIXL_DEBUG', '-O0']
|
||||
},
|
||||
'mode:release' : {
|
||||
'CCFLAGS' : ['-O3'],
|
||||
},
|
||||
'simulator:aarch64' : {
|
||||
'CCFLAGS' : ['-DVIXL_INCLUDE_SIMULATOR_AARCH64'],
|
||||
},
|
||||
'symbols:on' : {
|
||||
'CCFLAGS' : ['-g'],
|
||||
'LINKFLAGS' : ['-g']
|
||||
},
|
||||
'negative_testing:on' : {
|
||||
'CCFLAGS' : ['-DVIXL_NEGATIVE_TESTING']
|
||||
},
|
||||
'code_buffer_allocator:mmap' : {
|
||||
'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MMAP']
|
||||
},
|
||||
'code_buffer_allocator:malloc' : {
|
||||
'CCFLAGS' : ['-DVIXL_CODE_BUFFER_MALLOC']
|
||||
},
|
||||
'ubsan:on' : {
|
||||
'CCFLAGS': ['-fsanitize=undefined'],
|
||||
'LINKFLAGS': ['-fsanitize=undefined']
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# A `DefaultVariable` has a default value that depends on elements not known
|
||||
# when variables are first evaluated.
|
||||
# Each `DefaultVariable` has a handler that will compute the default value for
|
||||
# the given environment.
|
||||
def modifiable_flags_handler(env):
|
||||
env['modifiable_flags'] = \
|
||||
'on' if 'mode' in env and env['mode'] == 'debug' else 'off'
|
||||
|
||||
|
||||
def symbols_handler(env):
|
||||
env['symbols'] = 'on' if 'mode' in env and env['mode'] == 'debug' else 'off'
|
||||
|
||||
def Is32BitHost(env):
|
||||
return env['host_arch'] in ['aarch32', 'i386']
|
||||
|
||||
def IsAArch64Host(env):
|
||||
return env['host_arch'] == 'aarch64'
|
||||
|
||||
def CanTargetA32(env):
|
||||
return 'a32' in env['target']
|
||||
|
||||
def CanTargetT32(env):
|
||||
return 't32' in env['target']
|
||||
|
||||
def CanTargetAArch32(env):
|
||||
return CanTargetA32(env) or CanTargetT32(env)
|
||||
|
||||
def CanTargetA64(env):
|
||||
return 'a64' in env['target']
|
||||
|
||||
def CanTargetAArch64(env):
|
||||
return CanTargetA64(env)
|
||||
|
||||
|
||||
# By default, include the simulator only if AArch64 is targeted and we are not
|
||||
# building VIXL natively for AArch64.
|
||||
def simulator_handler(env):
|
||||
if not IsAArch64Host(env) and CanTargetAArch64(env):
|
||||
env['simulator'] = 'aarch64'
|
||||
else:
|
||||
env['simulator'] = 'none'
|
||||
|
||||
|
||||
# 'mmap' is required for use with 'mprotect', which is needed for the tests
|
||||
# (when running natively), so we use it by default where we can.
|
||||
def code_buffer_allocator_handler(env):
|
||||
directives = util.GetCompilerDirectives(env)
|
||||
if '__linux__' in directives:
|
||||
env['code_buffer_allocator'] = 'mmap'
|
||||
else:
|
||||
env['code_buffer_allocator'] = 'malloc'
|
||||
|
||||
# A validator checks the consistency of provided options against the environment.
|
||||
def default_validator(env):
|
||||
pass
|
||||
|
||||
|
||||
def simulator_validator(env):
|
||||
if env['simulator'] == 'aarch64' and not CanTargetAArch64(env):
|
||||
raise UserError('Building an AArch64 simulator implies that VIXL targets '
|
||||
'AArch64. Set `target` to include `aarch64` or `a64`.')
|
||||
|
||||
|
||||
# Default variables may depend on each other, therefore we need this dictionnary
|
||||
# to be ordered.
|
||||
vars_default_handlers = OrderedDict({
|
||||
# variable_name : [ 'default val', 'handler', 'validator']
|
||||
'symbols' : [ 'mode==debug', symbols_handler, default_validator ],
|
||||
'modifiable_flags' : [ 'mode==debug', modifiable_flags_handler, default_validator],
|
||||
'simulator' : [ 'on if the target architectures include AArch64 but '
|
||||
'the host is not AArch64, else off',
|
||||
simulator_handler, simulator_validator ],
|
||||
'code_buffer_allocator' : [ 'mmap with __linux__, malloc otherwise',
|
||||
code_buffer_allocator_handler, default_validator ]
|
||||
})
|
||||
|
||||
|
||||
def DefaultVariable(name, help, allowed_values):
|
||||
help = '%s (%s)' % (help, '|'.join(allowed_values))
|
||||
default_value = vars_default_handlers[name][0]
|
||||
def validator(name, value, env):
|
||||
if value != default_value and value not in allowed_values:
|
||||
raise UserError('Invalid value for option {name}: {value}. '
|
||||
'Valid values are: {allowed_values}'.format(
|
||||
name, value, allowed_values))
|
||||
return (name, help, default_value, validator)
|
||||
|
||||
|
||||
def SortListVariable(iterator):
|
||||
# Previously this code relied on the order of items in a list
|
||||
# converted from a set. However in Python 3 the order changes each run.
|
||||
# Here we do a custom partial sort to ensure that the build directory
|
||||
# name is stable, the same across Python 2 and 3, and the same as the
|
||||
# old code.
|
||||
result = list(sorted(iterator))
|
||||
result = sorted(result, key=lambda x: x == 't32', reverse=True)
|
||||
result = sorted(result, key=lambda x: x == 'a32', reverse=True)
|
||||
result = sorted(result, key=lambda x: x == 'a64', reverse=True)
|
||||
return result
|
||||
|
||||
|
||||
def AliasedListVariable(name, help, default_value, allowed_values, aliasing):
|
||||
help = '%s (all|auto|comma-separated list) (any combination from [%s])' % \
|
||||
(help, ', '.join(allowed_values))
|
||||
|
||||
def validator(name, value, env):
|
||||
# Here list has been converted to space separated strings.
|
||||
if value == '': return # auto
|
||||
for v in value.split():
|
||||
if v not in allowed_values:
|
||||
raise UserError('Invalid value for %s: %s' % (name, value))
|
||||
|
||||
def converter(value):
|
||||
if value == 'auto': return []
|
||||
if value == 'all':
|
||||
translated = [aliasing[v] for v in allowed_values]
|
||||
return SortListVariable(itertools.chain.from_iterable(translated))
|
||||
# The validator is run later hence the get.
|
||||
translated = [aliasing.get(v, v) for v in value.split(',')]
|
||||
return SortListVariable(itertools.chain.from_iterable(translated))
|
||||
|
||||
return (name, help, default_value, validator, converter)
|
||||
|
||||
|
||||
vars = Variables()
|
||||
# Define command line build options.
|
||||
vars.AddVariables(
|
||||
AliasedListVariable('target', 'Target ISA/Architecture', 'auto',
|
||||
['aarch32', 'a32', 't32', 'aarch64', 'a64'],
|
||||
{'aarch32' : ['a32', 't32'],
|
||||
'a32' : ['a32'], 't32' : ['t32'],
|
||||
'aarch64' : ['a64'], 'a64' : ['a64']}),
|
||||
EnumVariable('mode', 'Build mode',
|
||||
'release', allowed_values=config.build_options_modes),
|
||||
EnumVariable('ubsan', 'Enable undefined behavior checks',
|
||||
'off', allowed_values=['on', 'off']),
|
||||
EnumVariable('negative_testing',
|
||||
'Enable negative testing (needs exceptions)',
|
||||
'off', allowed_values=['on', 'off']),
|
||||
DefaultVariable('symbols', 'Include debugging symbols in the binaries',
|
||||
['on', 'off']),
|
||||
DefaultVariable('simulator', 'Simulators to include', ['aarch64', 'none']),
|
||||
DefaultVariable('code_buffer_allocator',
|
||||
'Configure the allocation mechanism in the CodeBuffer',
|
||||
['malloc', 'mmap']),
|
||||
('std',
|
||||
'C++ standard. The standards tested are: %s.' % \
|
||||
', '.join(config.tested_cpp_standards),
|
||||
config.tested_cpp_standards[0]),
|
||||
('compiler_wrapper', 'Command to prefix to the C and C++ compiler (e.g ccache)', '')
|
||||
)
|
||||
|
||||
# We use 'variant directories' to avoid recompiling multiple times when build
|
||||
# options are changed, different build paths are used depending on the options
|
||||
# set. These are the options that should be reflected in the build directory
|
||||
# path.
|
||||
options_influencing_build_path = [
|
||||
'target', 'mode', 'symbols', 'compiler', 'std', 'simulator', 'negative_testing',
|
||||
'code_buffer_allocator'
|
||||
]
|
||||
|
||||
|
||||
|
||||
# Build helpers ----------------------------------------------------------------
|
||||
|
||||
def RetrieveEnvironmentVariables(env):
|
||||
for key in ['CC', 'CXX', 'AR', 'RANLIB', 'LD']:
|
||||
if os.getenv(key): env[key] = os.getenv(key)
|
||||
if os.getenv('LD_LIBRARY_PATH'): env['LIBPATH'] = os.getenv('LD_LIBRARY_PATH')
|
||||
if os.getenv('CCFLAGS'):
|
||||
env.Append(CCFLAGS = os.getenv('CCFLAGS').split())
|
||||
if os.getenv('CXXFLAGS'):
|
||||
env.Append(CXXFLAGS = os.getenv('CXXFLAGS').split())
|
||||
if os.getenv('LINKFLAGS'):
|
||||
env.Append(LINKFLAGS = os.getenv('LINKFLAGS').split())
|
||||
|
||||
# The architecture targeted by default will depend on the compiler being
|
||||
# used. 'host_arch' is extracted from the compiler while 'target' can be
|
||||
# set by the user.
|
||||
# By default, we target both AArch32 and AArch64 unless the compiler targets a
|
||||
# 32-bit architecture. At the moment, we cannot build VIXL's AArch64 support on
|
||||
# a 32-bit platform.
|
||||
# TODO: Port VIXL to build on a 32-bit platform.
|
||||
def target_handler(env):
|
||||
# Auto detect
|
||||
if Is32BitHost(env):
|
||||
# We use list(set(...)) to keep the same order as if it was specify as
|
||||
# an option.
|
||||
env['target'] = SortListVariable(['a32', 't32'])
|
||||
else:
|
||||
env['target'] = SortListVariable(['a64', 'a32', 't32'])
|
||||
|
||||
|
||||
def target_validator(env):
|
||||
# TODO: Port VIXL64 to work on a 32-bit platform.
|
||||
if Is32BitHost(env) and CanTargetAArch64(env):
|
||||
raise UserError('Building VIXL for AArch64 in 32-bit is not supported. Set '
|
||||
'`target` to `aarch32`')
|
||||
|
||||
|
||||
# The target option is handled differently from the rest.
|
||||
def ProcessTargetOption(env):
|
||||
if env['target'] == []: target_handler(env)
|
||||
|
||||
if 'a32' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_A32']
|
||||
if 't32' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_T32']
|
||||
if 'a64' in env['target']: env['CCFLAGS'] += ['-DVIXL_INCLUDE_TARGET_A64']
|
||||
|
||||
target_validator(env)
|
||||
|
||||
|
||||
def ProcessBuildOptions(env):
|
||||
# 'all' is unconditionally processed.
|
||||
if 'all' in options:
|
||||
for var in options['all']:
|
||||
if var in env and env[var]:
|
||||
env[var] += options['all'][var]
|
||||
else:
|
||||
env[var] = options['all'][var]
|
||||
|
||||
# The target option *must* be processed before the options defined in
|
||||
# vars_default_handlers.
|
||||
ProcessTargetOption(env)
|
||||
|
||||
# Other build options must match 'option:value'
|
||||
env_dict = env.Dictionary()
|
||||
|
||||
# First apply the default variables handlers in order.
|
||||
for key, value in vars_default_handlers.items():
|
||||
default = value[0]
|
||||
handler = value[1]
|
||||
if env_dict.get(key) == default:
|
||||
handler(env_dict)
|
||||
|
||||
# Second, run the series of validators, to check for errors.
|
||||
for _, value in vars_default_handlers.items():
|
||||
validator = value[2]
|
||||
validator(env)
|
||||
|
||||
for key in env_dict.keys():
|
||||
# Then update the environment according to the value of the variable.
|
||||
key_val_couple = key + ':%s' % env_dict[key]
|
||||
if key_val_couple in options:
|
||||
for var in options[key_val_couple]:
|
||||
env[var] += options[key_val_couple][var]
|
||||
|
||||
|
||||
def ConfigureEnvironmentForCompiler(env):
|
||||
compiler = util.CompilerInformation(env)
|
||||
if compiler == 'clang':
|
||||
# These warnings only work for Clang.
|
||||
# -Wimplicit-fallthrough only works when compiling the code base as C++11 or
|
||||
# newer. The compiler does not complain if the option is passed when
|
||||
# compiling earlier C++ standards.
|
||||
env.Append(CPPFLAGS = ['-Wimplicit-fallthrough', '-Wshorten-64-to-32'])
|
||||
|
||||
# The '-Wunreachable-code' flag breaks builds for clang 3.4.
|
||||
if compiler != 'clang-3.4':
|
||||
env.Append(CPPFLAGS = ['-Wunreachable-code'])
|
||||
|
||||
if env['ubsan'] == 'on':
|
||||
env.Append(LINKFLAGS = ['-fuse-ld=lld'])
|
||||
|
||||
# GCC 4.8 has a bug which produces a warning saying that an anonymous Operand
|
||||
# object might be used uninitialized:
|
||||
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=57045
|
||||
# The bug does not seem to appear in GCC 4.7, or in debug builds with GCC 4.8.
|
||||
if env['mode'] == 'release':
|
||||
if compiler == 'gcc-4.8':
|
||||
env.Append(CPPFLAGS = ['-Wno-maybe-uninitialized'])
|
||||
|
||||
# GCC 6 and higher is able to detect throwing from inside a destructor and
|
||||
# reports a warning. However, if negative testing is enabled then assertions
|
||||
# will throw exceptions.
|
||||
if env['negative_testing'] == 'on' and env['mode'] == 'debug' \
|
||||
and compiler >= 'gcc-6':
|
||||
env.Append(CPPFLAGS = ['-Wno-terminate'])
|
||||
|
||||
# Suggest missing override keywords on methods.
|
||||
if compiler >= 'gcc-5':
|
||||
env.Append(CPPFLAGS = ['-Wsuggest-override'])
|
||||
elif compiler >= 'clang-3.6':
|
||||
env.Append(CPPFLAGS = ['-Winconsistent-missing-override'])
|
||||
|
||||
|
||||
def ConfigureEnvironment(env):
|
||||
RetrieveEnvironmentVariables(env)
|
||||
env['compiler'] = env['CXX']
|
||||
if env['compiler_wrapper'] != '':
|
||||
env['CXX'] = env['compiler_wrapper'] + ' ' + env['CXX']
|
||||
env['CC'] = env['compiler_wrapper'] + ' ' + env['CC']
|
||||
env['host_arch'] = util.GetHostArch(env)
|
||||
ProcessBuildOptions(env)
|
||||
if 'std' in env:
|
||||
env.Append(CPPFLAGS = ['-std=' + env['std']])
|
||||
std_path = env['std']
|
||||
ConfigureEnvironmentForCompiler(env)
|
||||
|
||||
|
||||
def TargetBuildDir(env):
|
||||
# Build-time option values are embedded in the build path to avoid requiring a
|
||||
# full build when an option changes.
|
||||
build_dir = config.dir_build
|
||||
for option in options_influencing_build_path:
|
||||
option_value = ''.join(env[option]) if option in env else ''
|
||||
build_dir = join(build_dir, option + '_'+ option_value)
|
||||
return build_dir
|
||||
|
||||
|
||||
def PrepareVariantDir(location, build_dir):
|
||||
location_build_dir = join(build_dir, location)
|
||||
VariantDir(location_build_dir, location)
|
||||
return location_build_dir
|
||||
|
||||
|
||||
def VIXLLibraryTarget(env):
|
||||
build_dir = TargetBuildDir(env)
|
||||
# Create a link to the latest build directory.
|
||||
# Use `-r` to avoid failure when `latest` exists and is a directory.
|
||||
subprocess.check_call(["rm", "-rf", config.dir_build_latest])
|
||||
util.ensure_dir(build_dir)
|
||||
subprocess.check_call(["ln", "-s", build_dir, config.dir_build_latest])
|
||||
# Source files are in `src` and in `src/aarch64/`.
|
||||
variant_dir_vixl = PrepareVariantDir(join('src'), build_dir)
|
||||
sources = [Glob(join(variant_dir_vixl, '*.cc'))]
|
||||
if CanTargetAArch32(env):
|
||||
variant_dir_aarch32 = PrepareVariantDir(join('src', 'aarch32'), build_dir)
|
||||
sources.append(Glob(join(variant_dir_aarch32, '*.cc')))
|
||||
if CanTargetAArch64(env):
|
||||
variant_dir_aarch64 = PrepareVariantDir(join('src', 'aarch64'), build_dir)
|
||||
sources.append(Glob(join(variant_dir_aarch64, '*.cc')))
|
||||
return env.Library(join(build_dir, 'vixl'), sources)
|
||||
|
||||
|
||||
|
||||
# Build ------------------------------------------------------------------------
|
||||
|
||||
# The VIXL library, built by default.
|
||||
env = Environment(variables = vars,
|
||||
BUILDERS = {
|
||||
'Markdown': Builder(action = 'markdown $SOURCE > $TARGET',
|
||||
suffix = '.html')
|
||||
}, ENV = os.environ)
|
||||
# Abort the build if any command line option is unknown or invalid.
|
||||
unknown_build_options = vars.UnknownVariables()
|
||||
if unknown_build_options:
|
||||
print('Unknown build options: ' + str(unknown_build_options.keys()))
|
||||
Exit(1)
|
||||
|
||||
if env['negative_testing'] == 'on' and env['mode'] != 'debug':
|
||||
print('negative_testing only works in debug mode')
|
||||
Exit(1)
|
||||
|
||||
ConfigureEnvironment(env)
|
||||
Help(vars.GenerateHelpText(env))
|
||||
libvixl = VIXLLibraryTarget(env)
|
||||
Default(libvixl)
|
||||
env.Alias('libvixl', libvixl)
|
||||
top_level_targets.Add('', 'Build the VIXL library.')
|
||||
|
||||
|
||||
# Common test code.
|
||||
test_build_dir = PrepareVariantDir('test', TargetBuildDir(env))
|
||||
test_objects = [env.Object(Glob(join(test_build_dir, '*.cc')))]
|
||||
|
||||
# AArch32 support
|
||||
if CanTargetAArch32(env):
|
||||
# The examples.
|
||||
aarch32_example_names = util.ListCCFilesWithoutExt(config.dir_aarch32_examples)
|
||||
aarch32_examples_build_dir = PrepareVariantDir('examples/aarch32', TargetBuildDir(env))
|
||||
aarch32_example_targets = []
|
||||
for example in aarch32_example_names:
|
||||
prog = env.Program(join(aarch32_examples_build_dir, example),
|
||||
join(aarch32_examples_build_dir, example + '.cc'),
|
||||
LIBS=[libvixl])
|
||||
aarch32_example_targets.append(prog)
|
||||
env.Alias('aarch32_examples', aarch32_example_targets)
|
||||
top_level_targets.Add('aarch32_examples', 'Build the examples for AArch32.')
|
||||
|
||||
# The benchmarks
|
||||
aarch32_benchmark_names = util.ListCCFilesWithoutExt(config.dir_aarch32_benchmarks)
|
||||
aarch32_benchmarks_build_dir = PrepareVariantDir('benchmarks/aarch32', TargetBuildDir(env))
|
||||
aarch32_benchmark_targets = []
|
||||
for bench in aarch32_benchmark_names:
|
||||
prog = env.Program(join(aarch32_benchmarks_build_dir, bench),
|
||||
join(aarch32_benchmarks_build_dir, bench + '.cc'),
|
||||
LIBS=[libvixl])
|
||||
aarch32_benchmark_targets.append(prog)
|
||||
env.Alias('aarch32_benchmarks', aarch32_benchmark_targets)
|
||||
top_level_targets.Add('aarch32_benchmarks', 'Build the benchmarks for AArch32.')
|
||||
|
||||
# The tests.
|
||||
test_aarch32_build_dir = PrepareVariantDir(join('test', 'aarch32'), TargetBuildDir(env))
|
||||
test_objects.append(env.Object(
|
||||
Glob(join(test_aarch32_build_dir, '*.cc')),
|
||||
CPPPATH = env['CPPPATH'] + [config.dir_tests],
|
||||
CCFLAGS = [flag for flag in env['CCFLAGS'] if flag != '-O3']))
|
||||
|
||||
# AArch64 support
|
||||
if CanTargetAArch64(env):
|
||||
# The benchmarks.
|
||||
aarch64_benchmark_names = util.ListCCFilesWithoutExt(config.dir_aarch64_benchmarks)
|
||||
aarch64_benchmarks_build_dir = PrepareVariantDir('benchmarks/aarch64', TargetBuildDir(env))
|
||||
aarch64_benchmark_targets = []
|
||||
bench_utils = env.Object(join(aarch64_benchmarks_build_dir, 'bench-utils.o'),
|
||||
join(aarch64_benchmarks_build_dir, 'bench-utils.cc'))
|
||||
for bench in aarch64_benchmark_names:
|
||||
if bench != 'bench-utils':
|
||||
prog = env.Program(join(aarch64_benchmarks_build_dir, bench),
|
||||
[join(aarch64_benchmarks_build_dir, bench + '.cc'), bench_utils],
|
||||
LIBS=[libvixl])
|
||||
aarch64_benchmark_targets.append(prog)
|
||||
env.Alias('aarch64_benchmarks', aarch64_benchmark_targets)
|
||||
top_level_targets.Add('aarch64_benchmarks', 'Build the benchmarks for AArch64.')
|
||||
|
||||
# The examples.
|
||||
aarch64_example_names = util.ListCCFilesWithoutExt(config.dir_aarch64_examples)
|
||||
aarch64_examples_build_dir = PrepareVariantDir('examples/aarch64', TargetBuildDir(env))
|
||||
aarch64_example_targets = []
|
||||
for example in aarch64_example_names:
|
||||
prog = env.Program(join(aarch64_examples_build_dir, example),
|
||||
join(aarch64_examples_build_dir, example + '.cc'),
|
||||
LIBS=[libvixl])
|
||||
aarch64_example_targets.append(prog)
|
||||
env.Alias('aarch64_examples', aarch64_example_targets)
|
||||
top_level_targets.Add('aarch64_examples', 'Build the examples for AArch64.')
|
||||
|
||||
# The tests.
|
||||
test_aarch64_build_dir = PrepareVariantDir(join('test', 'aarch64'), TargetBuildDir(env))
|
||||
test_objects.append(env.Object(
|
||||
Glob(join(test_aarch64_build_dir, '*.cc')),
|
||||
CPPPATH = env['CPPPATH'] + [config.dir_tests],
|
||||
CCFLAGS = [flag for flag in env['CCFLAGS'] if flag != '-O3']))
|
||||
|
||||
# The test requires building the example files with specific options, so we
|
||||
# create a separate variant dir for the example objects built this way.
|
||||
test_aarch64_examples_vdir = join(TargetBuildDir(env), 'test', 'aarch64', 'test_examples')
|
||||
VariantDir(test_aarch64_examples_vdir, '.')
|
||||
test_aarch64_examples_obj = env.Object(
|
||||
[Glob(join(test_aarch64_examples_vdir, join('test', 'aarch64', 'examples', '*.cc'))),
|
||||
Glob(join(test_aarch64_examples_vdir, join('examples/aarch64', '*.cc')))],
|
||||
CCFLAGS = env['CCFLAGS'] + ['-DTEST_EXAMPLES'],
|
||||
CPPPATH = env['CPPPATH'] + [config.dir_aarch64_examples] + [config.dir_tests])
|
||||
test_objects.append(test_aarch64_examples_obj)
|
||||
|
||||
test = env.Program(join(test_build_dir, 'test-runner'), test_objects,
|
||||
LIBS=[libvixl])
|
||||
env.Alias('tests', test)
|
||||
top_level_targets.Add('tests', 'Build the tests.')
|
||||
|
||||
|
||||
env.Alias('all', top_level_targets.targets)
|
||||
top_level_targets.Add('all', 'Build all the targets above.')
|
||||
|
||||
Help('\n\nAvailable top level targets:\n' + top_level_targets.Help())
|
||||
|
||||
extra_targets = VIXLTargets()
|
||||
|
||||
# Build documentation
|
||||
doc = [
|
||||
env.Markdown('README.md'),
|
||||
env.Markdown('doc/changelog.md'),
|
||||
env.Markdown('doc/aarch32/getting-started-aarch32.md'),
|
||||
env.Markdown('doc/aarch32/design/code-generation-aarch32.md'),
|
||||
env.Markdown('doc/aarch32/design/literal-pool-aarch32.md'),
|
||||
env.Markdown('doc/aarch64/supported-instructions-aarch64.md'),
|
||||
env.Markdown('doc/aarch64/getting-started-aarch64.md'),
|
||||
env.Markdown('doc/aarch64/topics/ycm.md'),
|
||||
env.Markdown('doc/aarch64/topics/extending-the-disassembler.md'),
|
||||
env.Markdown('doc/aarch64/topics/index.md'),
|
||||
]
|
||||
env.Alias('doc', doc)
|
||||
extra_targets.Add('doc', 'Convert documentation to HTML (requires the '
|
||||
'`markdown` program).')
|
||||
|
||||
Help('\nAvailable extra targets:\n' + extra_targets.Help())
|
30
externals/vixl/vixl/VERSIONS.md
vendored
Normal file
30
externals/vixl/vixl/VERSIONS.md
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
Versioning
|
||||
==========
|
||||
|
||||
Since version 3.0.0, VIXL uses [Semantic Versioning 2.0.0][semver].
|
||||
|
||||
Briefly:
|
||||
|
||||
- Backwards-incompatible changes update the _major_ version.
|
||||
- New features update the _minor_ version.
|
||||
- Bug fixes update the _patch_ version.
|
||||
|
||||
Why 3.0.0?
|
||||
----------
|
||||
|
||||
VIXL was originally released as 1.x using snapshot releases. When we moved VIXL
|
||||
into Linaro, we started working directly on `master` and stopped tagging
|
||||
named releases. However, we informally called this "VIXL 2", so we are skipping
|
||||
2.0.0 to avoid potential confusion.
|
||||
|
||||
Using `master`
|
||||
--------------
|
||||
|
||||
Users who want to take the latest development version of VIXL can still take
|
||||
commits from `master`. Our day-to-day development process hasn't changed and
|
||||
these commits should still pass their own tests. However, note that commits not
|
||||
explicitly tagged with a given version should be considered to be unversioned,
|
||||
with no backwards-compatibility guarantees.
|
||||
|
||||
[semver]: https://semver.org/spec/v2.0.0.html
|
||||
"Semantic Versioning 2.0.0 Specification"
|
9627
externals/vixl/vixl/benchmarks/aarch32/asm-disasm-speed-test.cc
vendored
Normal file
9627
externals/vixl/vixl/benchmarks/aarch32/asm-disasm-speed-test.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
108
externals/vixl/vixl/benchmarks/aarch32/bench-branch-link-masm.cc
vendored
Normal file
108
externals/vixl/vixl/benchmarks/aarch32/bench-branch-link-masm.cc
vendored
Normal file
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/macro-assembler-aarch32.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
#ifdef VIXL_DEBUG
|
||||
static const int kDefaultIterationsCount = 10000;
|
||||
#else
|
||||
static const int kDefaultIterationsCount = 100000;
|
||||
#endif
|
||||
|
||||
// This program focuses on the emission of branches and veneers.
|
||||
void benchmark(int iterations, InstructionSet isa) {
|
||||
const int buffer_size = 256 * KBytes;
|
||||
|
||||
timeval start;
|
||||
gettimeofday(&start, NULL);
|
||||
MacroAssembler masm(buffer_size);
|
||||
masm.UseInstructionSet(isa);
|
||||
|
||||
#define __ masm.
|
||||
|
||||
Label target_1, target_2, target_3, target_4;
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
__ B(&target_1);
|
||||
}
|
||||
__ Bind(&target_1);
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
__ B(eq, &target_2);
|
||||
}
|
||||
__ Bind(&target_2);
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
__ Bl(&target_3);
|
||||
}
|
||||
__ Bind(&target_3);
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
__ Blx(&target_4);
|
||||
}
|
||||
__ Bind(&target_4);
|
||||
|
||||
masm.FinalizeCode();
|
||||
timeval end;
|
||||
gettimeofday(&end, NULL);
|
||||
double delta = (end.tv_sec - start.tv_sec) +
|
||||
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
|
||||
printf("%s: time for %d iterations: %gs\n",
|
||||
isa == T32 ? "T32" : "A32",
|
||||
iterations,
|
||||
delta);
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int iterations = 0;
|
||||
|
||||
switch (argc) {
|
||||
case 1:
|
||||
iterations = kDefaultIterationsCount;
|
||||
break;
|
||||
case 2:
|
||||
iterations = atoi(argv[1]);
|
||||
break;
|
||||
default:
|
||||
printf("Usage: %s [#iterations]\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#ifdef VIXL_INCLUDE_TARGET_A32
|
||||
benchmark(iterations, A32);
|
||||
#endif
|
||||
#ifdef VIXL_INCLUDE_TARGET_T32
|
||||
benchmark(iterations, T32);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
98
externals/vixl/vixl/benchmarks/aarch32/bench-branch-masm.cc
vendored
Normal file
98
externals/vixl/vixl/benchmarks/aarch32/bench-branch-masm.cc
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/macro-assembler-aarch32.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
static const int kDefaultIterationCount = 100000;
|
||||
|
||||
// This program focuses on emitting branch instructions targeting a label bound
|
||||
// very closely. Here the MacroAssembler is used. This exercises label binding
|
||||
// and patching mechanisms, as well as the veneer resolving mechanisms for
|
||||
// branches not requiring veneers.
|
||||
void benchmark(int iterations, InstructionSet isa) {
|
||||
const int buffer_size = 256 * KBytes;
|
||||
|
||||
timeval start;
|
||||
gettimeofday(&start, NULL);
|
||||
MacroAssembler masm(buffer_size);
|
||||
masm.UseInstructionSet(isa);
|
||||
|
||||
#define __ masm.
|
||||
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
Label target;
|
||||
__ B(&target);
|
||||
__ B(eq, &target);
|
||||
__ Bl(&target);
|
||||
__ Blx(&target);
|
||||
__ Bind(&target);
|
||||
}
|
||||
|
||||
masm.FinalizeCode();
|
||||
timeval end;
|
||||
gettimeofday(&end, NULL);
|
||||
double delta = (end.tv_sec - start.tv_sec) +
|
||||
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
|
||||
printf("%s: time for %d iterations: %gs\n",
|
||||
isa == T32 ? "T32" : "A32",
|
||||
iterations,
|
||||
delta);
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int iterations = 0;
|
||||
|
||||
switch (argc) {
|
||||
case 1:
|
||||
iterations = kDefaultIterationCount;
|
||||
break;
|
||||
case 2:
|
||||
iterations = atoi(argv[1]);
|
||||
break;
|
||||
default:
|
||||
printf("Usage: %s [#iterations]\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#ifdef VIXL_INCLUDE_TARGET_A32
|
||||
benchmark(iterations, A32);
|
||||
#endif
|
||||
#ifdef VIXL_INCLUDE_TARGET_T32
|
||||
benchmark(iterations, T32);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
93
externals/vixl/vixl/benchmarks/aarch32/bench-dataop.cc
vendored
Normal file
93
externals/vixl/vixl/benchmarks/aarch32/bench-dataop.cc
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/macro-assembler-aarch32.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
static const unsigned kDefaultInstructionCount = 100000;
|
||||
|
||||
// This program focuses on emitting simple instructions.
|
||||
//
|
||||
// This code will emit a given number of 'add r0, r1, r2' in a buffer.
|
||||
// This code therefore focuses on Emit and Operand.
|
||||
void benchmark(unsigned instructions, InstructionSet isa) {
|
||||
const unsigned buffer_size = 256 * KBytes;
|
||||
timeval start;
|
||||
gettimeofday(&start, NULL);
|
||||
|
||||
MacroAssembler masm(buffer_size);
|
||||
masm.UseInstructionSet(isa);
|
||||
|
||||
#define __ masm.
|
||||
|
||||
for (unsigned i = 0; i < instructions; ++i) {
|
||||
__ Add(r0, r1, Operand(r2));
|
||||
}
|
||||
|
||||
masm.FinalizeCode();
|
||||
|
||||
timeval end;
|
||||
gettimeofday(&end, NULL);
|
||||
double delta = (end.tv_sec - start.tv_sec) +
|
||||
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
|
||||
printf("%s: time for %d instructions: %gs\n",
|
||||
isa == T32 ? "T32" : "A32",
|
||||
instructions,
|
||||
delta);
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
unsigned instructions = 0;
|
||||
|
||||
switch (argc) {
|
||||
case 1:
|
||||
instructions = kDefaultInstructionCount;
|
||||
break;
|
||||
case 2:
|
||||
instructions = atoi(argv[1]);
|
||||
break;
|
||||
default:
|
||||
printf("Usage: %s [#instructions]\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#ifdef VIXL_INCLUDE_TARGET_A32
|
||||
benchmark(instructions, A32);
|
||||
#endif
|
||||
#ifdef VIXL_INCLUDE_TARGET_T32
|
||||
benchmark(instructions, T32);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
105
externals/vixl/vixl/benchmarks/aarch32/bench-literal.cc
vendored
Normal file
105
externals/vixl/vixl/benchmarks/aarch32/bench-literal.cc
vendored
Normal file
|
@ -0,0 +1,105 @@
|
|||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/macro-assembler-aarch32.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
static const int kDefaultIterationCount = 1000;
|
||||
static const int kDefaultLiteralCount = 100;
|
||||
|
||||
// This program focuses on emitting branch instructions targeting a label bound
|
||||
// very closely. Here the MacroAssembler is used. This exercises label binding
|
||||
// and patching mechanisms, as well as the veneer resolving mechanisms for
|
||||
// branches not requiring veneers.
|
||||
void benchmark(int iterations, int literals, InstructionSet isa) {
|
||||
const int buffer_size = 256 * KBytes;
|
||||
|
||||
timeval start;
|
||||
gettimeofday(&start, NULL);
|
||||
MacroAssembler masm(buffer_size);
|
||||
masm.UseInstructionSet(isa);
|
||||
|
||||
#define __ masm.
|
||||
|
||||
// Load a number of distinct literals, for a number of iterations, forcing
|
||||
// pool emission in between.
|
||||
for (int i = 0; i < iterations; i++) {
|
||||
for (int j = 0; j < literals; j++) {
|
||||
__ Ldr(r0, j);
|
||||
__ FinalizeCode();
|
||||
}
|
||||
}
|
||||
|
||||
timeval end;
|
||||
gettimeofday(&end, NULL);
|
||||
double delta = (end.tv_sec - start.tv_sec) +
|
||||
static_cast<double>(end.tv_usec - start.tv_usec) / 1000000;
|
||||
printf("%s: time for %d iterations: %gs\n",
|
||||
isa == T32 ? "T32" : "A32",
|
||||
iterations,
|
||||
delta);
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
int iterations = 0;
|
||||
int literals = 0;
|
||||
|
||||
switch (argc) {
|
||||
case 1:
|
||||
iterations = kDefaultIterationCount;
|
||||
literals = kDefaultLiteralCount;
|
||||
break;
|
||||
case 2:
|
||||
iterations = atoi(argv[1]);
|
||||
literals = kDefaultLiteralCount;
|
||||
break;
|
||||
case 3:
|
||||
iterations = atoi(argv[1]);
|
||||
literals = atoi(argv[2]);
|
||||
break;
|
||||
default:
|
||||
printf("Usage: %s [#iterations] [#literals]\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
#ifdef VIXL_INCLUDE_TARGET_A32
|
||||
benchmark(iterations, literals, A32);
|
||||
#endif
|
||||
#ifdef VIXL_INCLUDE_TARGET_T32
|
||||
benchmark(iterations, literals, T32);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
78
externals/vixl/vixl/benchmarks/aarch64/bench-branch-link-masm.cc
vendored
Normal file
78
externals/vixl/vixl/benchmarks/aarch64/bench-branch-link-masm.cc
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// This program focuses on the emission of branches and veneers.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
const size_t instructions_per_iteration = 4;
|
||||
const size_t max_buffer_iterations =
|
||||
buffer_size / (instructions_per_iteration * kInstructionSize);
|
||||
MacroAssembler masm(buffer_size);
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
do {
|
||||
masm.Reset();
|
||||
|
||||
Label target_1, target_2, target_3, target_4;
|
||||
for (size_t i = 0; i < max_buffer_iterations; i++) {
|
||||
masm.B(&target_1);
|
||||
}
|
||||
masm.Bind(&target_1);
|
||||
for (size_t i = 0; i < max_buffer_iterations; i++) {
|
||||
masm.B(eq, &target_2);
|
||||
}
|
||||
masm.Bind(&target_2);
|
||||
for (size_t i = 0; i < max_buffer_iterations; i++) {
|
||||
masm.Cbz(x2, &target_3);
|
||||
}
|
||||
masm.Bind(&target_3);
|
||||
for (size_t i = 0; i < max_buffer_iterations; i++) {
|
||||
masm.Tbz(x3, 2, &target_4);
|
||||
}
|
||||
masm.Bind(&target_4);
|
||||
|
||||
masm.FinalizeCode();
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
66
externals/vixl/vixl/benchmarks/aarch64/bench-branch-link.cc
vendored
Normal file
66
externals/vixl/vixl/benchmarks/aarch64/bench-branch-link.cc
vendored
Normal file
|
@ -0,0 +1,66 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// Bind many branches to the same label, like bench-branch.cc but with a single
|
||||
// label. This stresses the label-linking mechanisms.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
const size_t buffer_instruction_count = buffer_size / kInstructionSize;
|
||||
MacroAssembler masm(buffer_size);
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
do {
|
||||
masm.Reset();
|
||||
{
|
||||
ExactAssemblyScope scope(&masm, buffer_size);
|
||||
Label target;
|
||||
for (size_t i = 0; i < buffer_instruction_count; ++i) {
|
||||
masm.b(&target);
|
||||
}
|
||||
masm.bind(&target);
|
||||
}
|
||||
masm.FinalizeCode();
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
70
externals/vixl/vixl/benchmarks/aarch64/bench-branch-masm.cc
vendored
Normal file
70
externals/vixl/vixl/benchmarks/aarch64/bench-branch-masm.cc
vendored
Normal file
|
@ -0,0 +1,70 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// This program focuses on emitting branch instructions targeting a label bound
|
||||
// very closely. Here the MacroAssembler is used. This exercises label binding
|
||||
// and patching mechanisms, as well as the veneer resolving mechanisms for
|
||||
// branches not requiring veneers.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
const size_t instructions_per_iteration = 4;
|
||||
const size_t max_buffer_iterations =
|
||||
buffer_size / (instructions_per_iteration * kInstructionSize);
|
||||
MacroAssembler masm(buffer_size);
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
do {
|
||||
masm.Reset();
|
||||
for (size_t i = 0; i < max_buffer_iterations; ++i) {
|
||||
Label target;
|
||||
masm.B(&target);
|
||||
masm.B(eq, &target);
|
||||
masm.Cbz(x2, &target);
|
||||
masm.Tbz(x3, 2, &target);
|
||||
masm.Bind(&target);
|
||||
}
|
||||
masm.FinalizeCode();
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
71
externals/vixl/vixl/benchmarks/aarch64/bench-branch.cc
vendored
Normal file
71
externals/vixl/vixl/benchmarks/aarch64/bench-branch.cc
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// This program focuses on emitting branch instructions.
|
||||
//
|
||||
// This code will emit a given number of branch immediate to the next
|
||||
// instructions in a fixed size buffer, looping over the buffer if necessary.
|
||||
// This code therefore focuses on Emit and label binding/patching.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
const size_t buffer_instruction_count = buffer_size / kInstructionSize;
|
||||
MacroAssembler masm(buffer_size);
|
||||
|
||||
// We emit a branch to the next instruction.
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
do {
|
||||
masm.Reset();
|
||||
{
|
||||
ExactAssemblyScope scope(&masm, buffer_size);
|
||||
for (size_t i = 0; i < buffer_instruction_count; ++i) {
|
||||
Label target;
|
||||
masm.b(&target);
|
||||
masm.bind(&target);
|
||||
}
|
||||
}
|
||||
masm.FinalizeCode();
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
67
externals/vixl/vixl/benchmarks/aarch64/bench-dataop.cc
vendored
Normal file
67
externals/vixl/vixl/benchmarks/aarch64/bench-dataop.cc
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// This program focuses on emitting simple instructions.
|
||||
//
|
||||
// This code will emit a given number of 'add x0, x1, x2' in a fixed size
|
||||
// buffer, looping over the buffer if necessary. This code therefore focuses
|
||||
// on Emit and Operand.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
const size_t buffer_instruction_count = buffer_size / kInstructionSize;
|
||||
MacroAssembler masm(buffer_size);
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
do {
|
||||
masm.Reset();
|
||||
{
|
||||
ExactAssemblyScope scope(&masm, buffer_size);
|
||||
for (size_t i = 0; i < buffer_instruction_count; ++i) {
|
||||
masm.add(x0, x1, Operand(x2));
|
||||
}
|
||||
}
|
||||
masm.FinalizeCode();
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
93
externals/vixl/vixl/benchmarks/aarch64/bench-mixed-disasm.cc
vendored
Normal file
93
externals/vixl/vixl/benchmarks/aarch64/bench-mixed-disasm.cc
vendored
Normal file
|
@ -0,0 +1,93 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// Like PrintDisassembler, but to prevent the I/O overhead from dominating the
|
||||
// benchmark, don't actually print anything.
|
||||
class BenchDisassembler : public Disassembler {
|
||||
public:
|
||||
BenchDisassembler() : Disassembler(), generated_chars_(0) {}
|
||||
|
||||
size_t GetGeneratedCharCount() const { return generated_chars_; }
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE {
|
||||
USE(instr);
|
||||
generated_chars_ += strlen(GetOutput());
|
||||
}
|
||||
|
||||
size_t generated_chars_;
|
||||
};
|
||||
|
||||
// This program measures the performance of the disassembler, using the same
|
||||
// code sequence used in bench-mixed-masm.cc.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
MacroAssembler masm(buffer_size);
|
||||
masm.SetCPUFeatures(CPUFeatures::All());
|
||||
BenchCodeGenerator generator(&masm);
|
||||
|
||||
masm.Reset();
|
||||
generator.Generate(buffer_size);
|
||||
masm.FinalizeCode();
|
||||
|
||||
const Instruction* start =
|
||||
masm.GetBuffer()->GetStartAddress<const Instruction*>();
|
||||
const Instruction* end =
|
||||
masm.GetBuffer()->GetEndAddress<const Instruction*>();
|
||||
|
||||
Decoder decoder;
|
||||
BenchDisassembler disasm;
|
||||
decoder.AppendVisitor(&disasm);
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
size_t generated_chars = 0;
|
||||
do {
|
||||
decoder.Decode(start, end);
|
||||
generated_chars += disasm.GetGeneratedCharCount();
|
||||
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
printf("Disassembled %" PRIu64 " characters.\n",
|
||||
static_cast<uint64_t>(generated_chars));
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
62
externals/vixl/vixl/benchmarks/aarch64/bench-mixed-masm.cc
vendored
Normal file
62
externals/vixl/vixl/benchmarks/aarch64/bench-mixed-masm.cc
vendored
Normal file
|
@ -0,0 +1,62 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// This program measures code generation time using the MacroAssembler. It aims
|
||||
// to be representative of realistic usage, but is not based on real traces.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
MacroAssembler masm(buffer_size);
|
||||
masm.SetCPUFeatures(CPUFeatures::All());
|
||||
BenchCodeGenerator generator(&masm);
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
do {
|
||||
masm.Reset();
|
||||
|
||||
generator.Generate(buffer_size);
|
||||
|
||||
masm.FinalizeCode();
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
96
externals/vixl/vixl/benchmarks/aarch64/bench-mixed-sim.cc
vendored
Normal file
96
externals/vixl/vixl/benchmarks/aarch64/bench-mixed-sim.cc
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
#include "aarch64/instructions-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
#include "aarch64/simulator-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// Like PrintDisassembler, but to prevent the I/O overhead from dominating the
|
||||
// benchmark, don't actually print anything.
|
||||
class BenchDisassembler : public Disassembler {
|
||||
public:
|
||||
BenchDisassembler() : Disassembler(), generated_chars_(0) {}
|
||||
|
||||
size_t GetGeneratedCharCount() const { return generated_chars_; }
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE {
|
||||
USE(instr);
|
||||
generated_chars_ += strlen(GetOutput());
|
||||
}
|
||||
|
||||
size_t generated_chars_;
|
||||
};
|
||||
|
||||
// This program measures the performance of the disassembler, using the same
|
||||
// code sequence used in bench-mixed-masm.cc.
|
||||
int main(int argc, char* argv[]) {
|
||||
BenchCLI cli(argc, argv);
|
||||
if (cli.ShouldExitEarly()) return cli.GetExitCode();
|
||||
|
||||
const size_t buffer_size = 256 * KBytes;
|
||||
MacroAssembler masm(buffer_size);
|
||||
masm.SetCPUFeatures(CPUFeatures::All());
|
||||
BenchCodeGenerator generator(&masm);
|
||||
|
||||
masm.Reset();
|
||||
generator.Generate(buffer_size);
|
||||
masm.FinalizeCode();
|
||||
|
||||
const Instruction* start =
|
||||
masm.GetBuffer()->GetStartAddress<const Instruction*>();
|
||||
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
simulator.SetCPUFeatures(CPUFeatures::All());
|
||||
|
||||
BenchTimer timer;
|
||||
|
||||
size_t iterations = 0;
|
||||
do {
|
||||
simulator.RunFrom(start);
|
||||
iterations++;
|
||||
} while (!timer.HasRunFor(cli.GetRunTimeInSeconds()));
|
||||
|
||||
cli.PrintResults(iterations, timer.GetElapsedSeconds());
|
||||
return cli.GetExitCode();
|
||||
}
|
||||
|
||||
#else // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
printf("This benchmark requires AArch64 simulator support.\n");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
399
externals/vixl/vixl/benchmarks/aarch64/bench-utils.cc
vendored
Normal file
399
externals/vixl/vixl/benchmarks/aarch64/bench-utils.cc
vendored
Normal file
|
@ -0,0 +1,399 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "globals-vixl.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
#include "bench-utils.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm_->
|
||||
|
||||
const Register BenchCodeGenerator::scratch = x28;
|
||||
|
||||
Register BenchCodeGenerator::PickR(unsigned size_in_bits) {
|
||||
// Only select caller-saved registers [x0, x15].
|
||||
return Register(static_cast<unsigned>(GetRandomBits(4)), size_in_bits);
|
||||
}
|
||||
|
||||
VRegister BenchCodeGenerator::PickV(unsigned size_in_bits) {
|
||||
// Only select caller-saved registers [v0, v7] or [v16, v31].
|
||||
// The resulting distribution is not uniform.
|
||||
unsigned code = static_cast<unsigned>(GetRandomBits(5));
|
||||
if (code < 16) code &= 0x7; // [v8, v15] -> [v0, v7]
|
||||
return VRegister(code, size_in_bits);
|
||||
}
|
||||
|
||||
uint64_t BenchCodeGenerator::GetRandomBits(int bits) {
|
||||
VIXL_ASSERT((bits >= 0) && (bits <= 64));
|
||||
uint64_t result = 0;
|
||||
|
||||
while (bits >= 32) {
|
||||
// For big chunks, call jrand48 directly.
|
||||
result = (result << 32) | jrand48(rand_state_); // [-2^31, 2^31]
|
||||
bits -= 32;
|
||||
}
|
||||
if (bits == 0) return result;
|
||||
|
||||
// We often only want a few bits at a time, so use stored entropy to avoid
|
||||
// frequent calls to jrand48.
|
||||
|
||||
if (bits > rnd_bits_) {
|
||||
// We want more bits than we have.
|
||||
result = (result << rnd_bits_) | rnd_;
|
||||
bits -= rnd_bits_;
|
||||
|
||||
rnd_ = static_cast<uint32_t>(jrand48(rand_state_)); // [-2^31, 2^31]
|
||||
rnd_bits_ = 32;
|
||||
}
|
||||
|
||||
VIXL_ASSERT(bits <= rnd_bits_);
|
||||
result = (result << bits) | (rnd_ % (UINT32_C(1) << bits));
|
||||
rnd_ >>= bits;
|
||||
rnd_bits_ -= bits;
|
||||
return result;
|
||||
}
|
||||
|
||||
unsigned BenchCodeGenerator::PickRSize() {
|
||||
return PickBool() ? kWRegSize : kXRegSize;
|
||||
}
|
||||
|
||||
unsigned BenchCodeGenerator::PickFPSize() {
|
||||
uint64_t entropy = GetRandomBits(4);
|
||||
// Doubles and floats are common in most languages, so use half-precision
|
||||
// types only rarely.
|
||||
if (entropy == 0) return kHRegSize;
|
||||
return ((entropy & 1) == 0) ? kSRegSize : kDRegSize;
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::Generate(size_t min_size_in_bytes) {
|
||||
Label start;
|
||||
__ Bind(&start);
|
||||
|
||||
call_depth_++;
|
||||
GeneratePrologue();
|
||||
|
||||
while (masm_->GetSizeOfCodeGeneratedSince(&start) < min_size_in_bytes) {
|
||||
GenerateArbitrarySequence();
|
||||
}
|
||||
|
||||
GenerateEpilogue();
|
||||
call_depth_--;
|
||||
|
||||
// Make sure that any labels (created by GenerateBranchSequence) are bound
|
||||
// before we exit.
|
||||
if (call_depth_ == 0) BindAllPendingLabels();
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GeneratePrologue() {
|
||||
// Construct a normal frame.
|
||||
VIXL_ASSERT(masm_->StackPointer().Is(sp));
|
||||
__ Push(lr, x29); // x29 is the frame pointer (fp).
|
||||
__ Mov(x29, sp);
|
||||
VIXL_ASSERT(call_depth_ > 0);
|
||||
if (call_depth_ == 1) {
|
||||
__ Push(scratch, xzr);
|
||||
// Claim space to use for load and stores.
|
||||
// - We need at least 4 * kQRegSize bytes for Ld4/St4.
|
||||
// - The architecture requires that we allocate a multiple of 16 bytes.
|
||||
// - There is no hard upper limit, but the Simulator has a limited stack
|
||||
// space.
|
||||
__ Claim((4 * kQRegSize) + (16 * GetRandomBits(3)));
|
||||
__ Mov(scratch, sp);
|
||||
}
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateEpilogue() {
|
||||
VIXL_ASSERT(call_depth_ > 0);
|
||||
if (call_depth_ == 1) {
|
||||
__ Sub(sp, x29, 2 * kXRegSizeInBytes); // Drop the scratch space.
|
||||
__ Pop(xzr, scratch);
|
||||
}
|
||||
__ Pop(x29, lr);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateArbitrarySequence() {
|
||||
// Bind pending labels, and remove them from the list.
|
||||
// Recently-linked labels are much more likely to be bound than old ones. This
|
||||
// should produce a mix of long- (veneered) and short-range branches.
|
||||
uint32_t bind_mask = static_cast<uint32_t>(
|
||||
GetRandomBits(8) | (GetRandomBits(7) << 1) | (GetRandomBits(6) << 2));
|
||||
BindPendingLabels(bind_mask);
|
||||
|
||||
// If we are at the top call level (call_depth_ == 1), generate nested calls
|
||||
// 1/4 of the time, and halve the chance for each call level below that.
|
||||
VIXL_ASSERT(call_depth_ > 0);
|
||||
if (GetRandomBits(call_depth_ + 1) == 0) {
|
||||
GenerateCallReturnSequence();
|
||||
return;
|
||||
}
|
||||
|
||||
// These weightings should be roughly representative of real functions.
|
||||
switch (GetRandomBits(4)) {
|
||||
case 0x0:
|
||||
case 0x1:
|
||||
GenerateTrivialSequence();
|
||||
return;
|
||||
case 0x2:
|
||||
case 0x3:
|
||||
case 0x4:
|
||||
case 0x5:
|
||||
GenerateOperandSequence();
|
||||
return;
|
||||
case 0x6:
|
||||
case 0x7:
|
||||
case 0x8:
|
||||
GenerateMemOperandSequence();
|
||||
return;
|
||||
case 0xb:
|
||||
case 0x9:
|
||||
case 0xa:
|
||||
GenerateImmediateSequence();
|
||||
return;
|
||||
case 0xc:
|
||||
case 0xd:
|
||||
GenerateBranchSequence();
|
||||
return;
|
||||
case 0xe:
|
||||
GenerateFPSequence();
|
||||
return;
|
||||
case 0xf:
|
||||
GenerateNEONSequence();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateTrivialSequence() {
|
||||
unsigned size = PickRSize();
|
||||
__ Asr(PickR(size), PickR(size), 4);
|
||||
__ Bfi(PickR(size), PickR(size), 5, 14);
|
||||
__ Bfc(PickR(size), 5, 14);
|
||||
__ Cinc(PickR(size), PickR(size), ge);
|
||||
__ Cinv(PickR(size), PickR(size), ne);
|
||||
__ Cls(PickR(size), PickR(size));
|
||||
__ Cneg(PickR(size), PickR(size), lt);
|
||||
__ Mrs(PickX(), NZCV);
|
||||
__ Nop();
|
||||
__ Mul(PickR(size), PickR(size), PickR(size));
|
||||
__ Rbit(PickR(size), PickR(size));
|
||||
__ Rev(PickR(size), PickR(size));
|
||||
__ Sdiv(PickR(size), PickR(size), PickR(size));
|
||||
if (!labels_.empty()) {
|
||||
__ Adr(PickX(), labels_.begin()->target);
|
||||
}
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateOperandSequence() {
|
||||
unsigned size = PickRSize();
|
||||
// The cast to Operand is normally implicit for simple registers, but we
|
||||
// explicitly specify it in every case here to ensure that the benchmark does
|
||||
// what we expect.
|
||||
__ And(PickR(size), PickR(size), Operand(PickR(size)));
|
||||
__ Bics(PickR(size), PickR(size), Operand(PickR(size)));
|
||||
__ Orr(PickR(size), PickR(size), Operand(PickR(size)));
|
||||
__ Eor(PickR(size), PickR(size), Operand(PickR(size)));
|
||||
__ Tst(PickR(size), Operand(PickR(size)));
|
||||
__ Eon(PickR(size), PickR(size), Operand(PickR(size)));
|
||||
__ Cmp(PickR(size), Operand(PickR(size)));
|
||||
__ Negs(PickR(size), Operand(PickR(size)));
|
||||
__ Mvn(PickR(size), Operand(PickR(size)));
|
||||
__ Ccmp(PickR(size), Operand(PickR(size)), NoFlag, eq);
|
||||
__ Ccmn(PickR(size), Operand(PickR(size)), NoFlag, eq);
|
||||
__ Csel(PickR(size), Operand(PickR(size)), Operand(PickR(size)), lt);
|
||||
{
|
||||
// Ensure that `claim` doesn't alias any PickR().
|
||||
UseScratchRegisterScope temps(masm_);
|
||||
Register claim = temps.AcquireX();
|
||||
// We should only claim a 16-byte-aligned amount, since we're using the
|
||||
// system stack pointer.
|
||||
__ Mov(claim, GetRandomBits(4) * 16);
|
||||
__ Claim(Operand(claim));
|
||||
// Also claim a bit more, so we can store at sp+claim.
|
||||
__ Claim(Operand(32));
|
||||
__ Poke(PickR(size), Operand(claim));
|
||||
__ Peek(PickR(size), Operand(8));
|
||||
__ Poke(PickR(size), Operand(16));
|
||||
__ Peek(PickR(size), Operand(claim.W(), UXTW));
|
||||
__ Drop(Operand(32));
|
||||
__ Drop(Operand(claim));
|
||||
}
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateMemOperandSequence() {
|
||||
unsigned size = PickRSize();
|
||||
RegList store_list = GetRandomBits(16); // Restrict to [x0, x15].
|
||||
__ StoreCPURegList(CPURegList(CPURegister::kRegister, size, store_list),
|
||||
MemOperand(scratch));
|
||||
RegList load_list = GetRandomBits(16); // Restrict to [x0, x15].
|
||||
__ LoadCPURegList(CPURegList(CPURegister::kRegister, size, load_list),
|
||||
MemOperand(scratch));
|
||||
__ Str(PickX(), MemOperand(scratch));
|
||||
__ Strb(PickW(), MemOperand(scratch, 42));
|
||||
__ Strh(PickW(), MemOperand(scratch, 42, PostIndex));
|
||||
__ Ldrsw(PickX(), MemOperand(scratch, -42, PreIndex));
|
||||
__ Ldr(PickR(size), MemOperand(scratch, 19)); // Translated to ldur.
|
||||
__ Push(PickX(), PickX());
|
||||
// Ensure unique registers (in [x0, x15]) for Pop.
|
||||
__ Pop(Register(static_cast<int>(GetRandomBits(2)) + 0, kWRegSize),
|
||||
Register(static_cast<int>(GetRandomBits(2)) + 4, kWRegSize),
|
||||
Register(static_cast<int>(GetRandomBits(2)) + 8, kWRegSize),
|
||||
Register(static_cast<int>(GetRandomBits(2)) + 12, kWRegSize));
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateImmediateSequence() {
|
||||
unsigned size = PickRSize();
|
||||
__ And(PickR(size), PickR(size), GetRandomBits(size));
|
||||
__ Sub(PickR(size), PickR(size), GetRandomBits(size));
|
||||
__ Mov(PickR(size), GetRandomBits(size));
|
||||
__ Movk(PickX(), GetRandomBits(16), static_cast<int>(GetRandomBits(2)) * 16);
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::BindPendingLabels(uint64_t bind_mask) {
|
||||
if (bind_mask == 0) return;
|
||||
// The labels we bind here jump back to just after each branch that refers
|
||||
// to them. This allows a simple, linear execution path, whilst still
|
||||
// benchmarking long-range labels.
|
||||
//
|
||||
// Ensure that code falling through into this sequence does not jump
|
||||
// back to an earlier point in the execution path.
|
||||
Label done;
|
||||
__ B(&done);
|
||||
|
||||
std::list<LabelPair>::iterator it = labels_.begin();
|
||||
while ((it != labels_.end()) && (bind_mask != 0)) {
|
||||
if ((bind_mask & 1) != 0) {
|
||||
// Bind the label and jump back to its source.
|
||||
__ Bind(it->target);
|
||||
__ B(it->cont);
|
||||
delete it->target;
|
||||
delete it->cont;
|
||||
it = labels_.erase(it);
|
||||
} else {
|
||||
++it; // Don't bind this one.
|
||||
}
|
||||
bind_mask >>= 1;
|
||||
}
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::BindAllPendingLabels() {
|
||||
while (!labels_.empty()) {
|
||||
// BindPendingLables generates a branch over each block of bound labels.
|
||||
// This will be repeated for each call here, but the effect is minimal and
|
||||
// (empirically) we rarely accumulate more than 64 pending labels anyway.
|
||||
BindPendingLabels(UINT64_MAX);
|
||||
}
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateBranchSequence() {
|
||||
{
|
||||
LabelPair pair = {new Label(), new Label()};
|
||||
__ B(lt, pair.target);
|
||||
__ Bind(pair.cont);
|
||||
labels_.push_front(pair);
|
||||
}
|
||||
|
||||
{
|
||||
LabelPair pair = {new Label(), new Label()};
|
||||
__ Tbz(PickX(),
|
||||
static_cast<int>(GetRandomBits(kXRegSizeLog2)),
|
||||
pair.target);
|
||||
__ Bind(pair.cont);
|
||||
labels_.push_front(pair);
|
||||
}
|
||||
|
||||
{
|
||||
LabelPair pair = {new Label(), new Label()};
|
||||
__ Cbz(PickX(), pair.target);
|
||||
__ Bind(pair.cont);
|
||||
labels_.push_front(pair);
|
||||
}
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateCallReturnSequence() {
|
||||
Label fn, done;
|
||||
|
||||
if (PickBool()) {
|
||||
__ Bl(&fn);
|
||||
} else {
|
||||
Register reg = PickX();
|
||||
__ Adr(reg, &fn);
|
||||
__ Blr(reg);
|
||||
}
|
||||
__ B(&done);
|
||||
|
||||
__ Bind(&fn);
|
||||
// Recurse with a randomised (but fairly small) minimum size.
|
||||
Generate(GetRandomBits(8));
|
||||
|
||||
__ Bind(&done);
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateFPSequence() {
|
||||
unsigned size = PickFPSize();
|
||||
unsigned other_size = PickBool() ? size * 2 : size / 2;
|
||||
if (other_size < kHRegSize) other_size = kDRegSize;
|
||||
if (other_size > kDRegSize) other_size = kHRegSize;
|
||||
|
||||
__ Fadd(PickV(size), PickV(size), PickV(size));
|
||||
__ Fmul(PickV(size), PickV(size), PickV(size));
|
||||
__ Fcvt(PickV(other_size), PickV(size));
|
||||
__ Fjcvtzs(PickW(), PickD());
|
||||
__ Fccmp(PickV(size), PickV(size), NCVFlag, pl);
|
||||
__ Fdiv(PickV(size), PickV(size), PickV(size));
|
||||
__ Fmov(PickV(size), 1.25 * GetRandomBits(2));
|
||||
__ Fmsub(PickV(size), PickV(size), PickV(size), PickV(size));
|
||||
__ Frintn(PickV(size), PickV(size));
|
||||
}
|
||||
|
||||
void BenchCodeGenerator::GenerateNEONSequence() {
|
||||
__ And(PickV().V16B(), PickV().V16B(), PickV().V16B());
|
||||
__ Sqrshl(PickV().V8H(), PickV().V8H(), PickV().V8H());
|
||||
__ Umull(PickV().V2D(), PickV().V2S(), PickV().V2S());
|
||||
__ Sqdmlal2(PickV().V4S(), PickV().V8H(), PickV().V8H());
|
||||
|
||||
// For structured loads and stores, we have to specify sequential (wrapped)
|
||||
// registers, so start with [v16, v31] and allow them to wrap in to the
|
||||
// [v0, v7] range.
|
||||
VRegister vt(16 + static_cast<unsigned>(GetRandomBits(4)), kQRegSize);
|
||||
VRegister vt2((vt.GetCode() + 1) % kNumberOfVRegisters, kQRegSize);
|
||||
VRegister vt3((vt.GetCode() + 2) % kNumberOfVRegisters, kQRegSize);
|
||||
VRegister vt4((vt.GetCode() + 3) % kNumberOfVRegisters, kQRegSize);
|
||||
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt));
|
||||
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt2));
|
||||
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt3));
|
||||
VIXL_ASSERT(!kCalleeSavedV.IncludesAliasOf(vt4));
|
||||
__ Ld3(vt.V4S(), vt2.V4S(), vt3.V4S(), MemOperand(scratch));
|
||||
__ St4(vt.V16B(), vt2.V16B(), vt3.V16B(), vt4.V16B(), MemOperand(scratch));
|
||||
|
||||
__ Fmaxv(PickV().H(), PickV().V8H());
|
||||
__ Fminp(PickV().V4S(), PickV().V4S(), PickV().V4S());
|
||||
}
|
267
externals/vixl/vixl/benchmarks/aarch64/bench-utils.h
vendored
Normal file
267
externals/vixl/vixl/benchmarks/aarch64/bench-utils.h
vendored
Normal file
|
@ -0,0 +1,267 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_BENCH_UTILS_H_
|
||||
#define VIXL_AARCH64_BENCH_UTILS_H_
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <sys/time.h>
|
||||
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
#include "globals-vixl.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
class BenchTimer {
|
||||
public:
|
||||
BenchTimer() { gettimeofday(&start_, NULL); }
|
||||
|
||||
double GetElapsedSeconds() const {
|
||||
timeval elapsed = GetElapsed();
|
||||
double sec = elapsed.tv_sec;
|
||||
double usec = elapsed.tv_usec;
|
||||
return sec + (usec / 1000000.0);
|
||||
}
|
||||
|
||||
bool HasRunFor(uint32_t seconds) {
|
||||
timeval elapsed = GetElapsed();
|
||||
VIXL_ASSERT(elapsed.tv_sec >= 0);
|
||||
return static_cast<uint64_t>(elapsed.tv_sec) >= seconds;
|
||||
}
|
||||
|
||||
private:
|
||||
timeval GetElapsed() const {
|
||||
VIXL_ASSERT(timerisset(&start_));
|
||||
timeval now, elapsed;
|
||||
gettimeofday(&now, NULL);
|
||||
timersub(&now, &start_, &elapsed);
|
||||
return elapsed;
|
||||
}
|
||||
|
||||
timeval start_;
|
||||
};
|
||||
|
||||
// Provide a standard command-line interface for all benchmarks.
|
||||
class BenchCLI {
|
||||
public:
|
||||
// Set default values.
|
||||
BenchCLI(int argc, char* argv[])
|
||||
: run_time_(kDefaultRunTime), status_(kRunBenchmark) {
|
||||
for (int i = 1; i < argc; i++) {
|
||||
if ((strcmp(argv[i], "-h") == 0) || (strcmp(argv[i], "--help") == 0)) {
|
||||
PrintUsage(argv[0]);
|
||||
status_ = kExitSuccess;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Use the default run time.
|
||||
if (argc == 1) return;
|
||||
|
||||
if (argc != 2) {
|
||||
if (argc > 0) PrintUsage(argv[0]);
|
||||
status_ = kExitFailure;
|
||||
return;
|
||||
}
|
||||
|
||||
char* end;
|
||||
unsigned long run_time = strtoul(argv[1], &end, 0); // NOLINT(runtime/int)
|
||||
if ((end == argv[1]) || (run_time > UINT32_MAX)) {
|
||||
PrintUsage(argv[0]);
|
||||
status_ = kExitFailure;
|
||||
return;
|
||||
}
|
||||
run_time_ = static_cast<uint32_t>(run_time);
|
||||
}
|
||||
|
||||
void PrintUsage(char* name) {
|
||||
printf("USAGE: %s [OPTIONS]... [RUN_TIME]\n", name);
|
||||
printf("\n");
|
||||
printf("Run a single VIXL benchmark for approximately RUN_TIME seconds,\n");
|
||||
printf("or %" PRIu32 " seconds if unspecified.\n", kDefaultRunTime);
|
||||
printf("\n");
|
||||
#ifdef VIXL_DEBUG
|
||||
printf("This is a DEBUG build. VIXL's assertions will be enabled, and\n");
|
||||
printf("extra debug information may be printed. The benchmark results\n");
|
||||
printf("are not representative of expected VIXL deployments.\n");
|
||||
printf("\n");
|
||||
#endif
|
||||
printf("OPTIONS:\n");
|
||||
printf("\n");
|
||||
printf(" -h, --help\n");
|
||||
printf(" Print this help message.\n");
|
||||
}
|
||||
|
||||
void PrintResults(uint64_t iterations, double elapsed_seconds) {
|
||||
double score = iterations / elapsed_seconds;
|
||||
printf("%g iteration%s per second (%" PRIu64 " / %g)",
|
||||
score,
|
||||
(score == 1.0) ? "" : "s",
|
||||
iterations,
|
||||
elapsed_seconds);
|
||||
#ifdef VIXL_DEBUG
|
||||
printf(" [Warning: DEBUG build]");
|
||||
#endif
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
bool ShouldExitEarly() const {
|
||||
switch (status_) {
|
||||
case kRunBenchmark:
|
||||
return false;
|
||||
case kExitFailure:
|
||||
case kExitSuccess:
|
||||
return true;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return true;
|
||||
}
|
||||
|
||||
int GetExitCode() const {
|
||||
switch (status_) {
|
||||
case kExitFailure:
|
||||
return EXIT_FAILURE;
|
||||
case kExitSuccess:
|
||||
case kRunBenchmark:
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
uint32_t GetRunTimeInSeconds() const { return run_time_; }
|
||||
|
||||
private:
|
||||
static const uint32_t kDefaultRunTime = 5;
|
||||
|
||||
uint32_t run_time_;
|
||||
|
||||
enum { kRunBenchmark, kExitSuccess, kExitFailure } status_;
|
||||
};
|
||||
|
||||
// Generate random, but valid (and simulatable) instruction sequences.
|
||||
//
|
||||
// The effect of the generated code is meaningless, but not harmful. That is,
|
||||
// it will not abort, callee-saved registers are properly preserved and so on.
|
||||
// It is possible to call it as a `void fn(void)` function.
|
||||
class BenchCodeGenerator {
|
||||
public:
|
||||
explicit BenchCodeGenerator(vixl::aarch64::MacroAssembler* masm)
|
||||
: masm_(masm), rnd_(0), rnd_bits_(0), call_depth_(0) {
|
||||
// Arbitrarily initialise rand_state_ using the behaviour of srand48(42).
|
||||
rand_state_[2] = 0;
|
||||
rand_state_[1] = 42;
|
||||
rand_state_[0] = 0x330e;
|
||||
}
|
||||
|
||||
void Generate(size_t min_size_in_bytes);
|
||||
|
||||
private:
|
||||
void GeneratePrologue();
|
||||
void GenerateEpilogue();
|
||||
|
||||
// Arbitrarily pick one of the other Generate*Sequence() functions.
|
||||
// TODO: Consider allowing this to be biased, so that a benchmark can focus on
|
||||
// a subset of sequences.
|
||||
void GenerateArbitrarySequence();
|
||||
|
||||
// Instructions with a trivial pass-through to Emit().
|
||||
void GenerateTrivialSequence();
|
||||
|
||||
// Instructions using the Operand and MemOperand abstractions. These have a
|
||||
// run-time cost, and many common VIXL APIs use them.
|
||||
void GenerateOperandSequence();
|
||||
void GenerateMemOperandSequence();
|
||||
|
||||
// Generate instructions taking immediates that require analysis (and may
|
||||
// result in multiple instructions per macro).
|
||||
void GenerateImmediateSequence();
|
||||
|
||||
// Immediate-offset and register branches. This also (necessarily) covers adr.
|
||||
void GenerateBranchSequence();
|
||||
|
||||
// Generate nested, conventional (blr+ret) calls.
|
||||
void GenerateCallReturnSequence();
|
||||
|
||||
void GenerateFPSequence();
|
||||
void GenerateNEONSequence();
|
||||
|
||||
// To exercise veneer pools, GenerateBranchSequence links labels that are
|
||||
// expected to be bound later. This helper binds them.
|
||||
// The Nth youngest label is bound if bit <N> is set in `bind_mask`. That
|
||||
// means that this helper can bind at most 64 pending labels.
|
||||
void BindPendingLabels(uint64_t bind_mask);
|
||||
|
||||
// As above, but unconditionally bind all pending labels (even if there are
|
||||
// more than 64 of them).
|
||||
void BindAllPendingLabels();
|
||||
|
||||
// Argument selection helpers. These only return caller-saved registers.
|
||||
|
||||
uint64_t GetRandomBits(int bits);
|
||||
bool PickBool() { return GetRandomBits(1) != 0; }
|
||||
|
||||
unsigned PickRSize();
|
||||
unsigned PickFPSize();
|
||||
|
||||
vixl::aarch64::Register PickR(unsigned size_in_bits);
|
||||
vixl::aarch64::VRegister PickV(
|
||||
unsigned size_in_bits = vixl::aarch64::kQRegSize);
|
||||
|
||||
vixl::aarch64::Register PickW() { return PickR(vixl::aarch64::kWRegSize); }
|
||||
vixl::aarch64::Register PickX() { return PickR(vixl::aarch64::kXRegSize); }
|
||||
vixl::aarch64::VRegister PickH() { return PickV(vixl::aarch64::kHRegSize); }
|
||||
vixl::aarch64::VRegister PickS() { return PickV(vixl::aarch64::kSRegSize); }
|
||||
vixl::aarch64::VRegister PickD() { return PickV(vixl::aarch64::kDRegSize); }
|
||||
|
||||
vixl::aarch64::MacroAssembler* masm_;
|
||||
|
||||
// State for *rand48(), used to randomise code generation.
|
||||
unsigned short rand_state_[3]; // NOLINT(runtime/int)
|
||||
|
||||
uint32_t rnd_;
|
||||
int rnd_bits_;
|
||||
|
||||
// The generator can produce nested calls. The probability of it doing this is
|
||||
// influenced by the current call depth, so we have to track it here.
|
||||
int call_depth_;
|
||||
|
||||
struct LabelPair {
|
||||
// We can't copy labels, so we have to allocate them dynamically to store
|
||||
// them in a std::list.
|
||||
vixl::aarch64::Label* target;
|
||||
vixl::aarch64::Label* cont;
|
||||
};
|
||||
std::list<LabelPair> labels_;
|
||||
|
||||
// Some sequences need a scratch area. Space for this is allocated on the
|
||||
// stack, and stored in this register.
|
||||
static const vixl::aarch64::Register scratch;
|
||||
};
|
||||
|
||||
#endif // VIXL_AARCH64_BENCH_UTILS_H_
|
239
externals/vixl/vixl/doc/aarch32/getting-started-aarch32.md
vendored
Normal file
239
externals/vixl/vixl/doc/aarch32/getting-started-aarch32.md
vendored
Normal file
|
@ -0,0 +1,239 @@
|
|||
Getting Started with VIXL for AArch32
|
||||
=====================================
|
||||
|
||||
|
||||
This guide will show you how to use the VIXL framework for AArch32. We will see
|
||||
how to set up the VIXL assembler and generate some code. We will also go into
|
||||
details on a few useful features provided by VIXL and see how to run the
|
||||
generated code.
|
||||
|
||||
The source code of the example developed in this guide can be found in the
|
||||
`examples/aarch32` directory (`examples/aarch32/getting-started.cc`).
|
||||
|
||||
|
||||
Creating the macro assembler.
|
||||
-----------------------------
|
||||
|
||||
First of all you need to make sure that the header files for the assembler are
|
||||
included. You should have the following lines at the beginning of your source
|
||||
file:
|
||||
|
||||
// You may use <cstdint> if using C++11 or later.
|
||||
extern "C" {
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/macro-assembler-aarch32.h"
|
||||
|
||||
In our case, those files are included by "examples.h".
|
||||
|
||||
All VIXL components are declared in the `vixl::aarch32` namespace, so let's add
|
||||
this to the beginning of the file for convenience (once again, done in
|
||||
"examples.h"):
|
||||
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
Now we are ready to create and initialise the different components.
|
||||
|
||||
First of all we need to create a macro assembler object.
|
||||
|
||||
MacroAssembler masm;
|
||||
|
||||
|
||||
Generating some code.
|
||||
---------------------
|
||||
|
||||
We are now ready to generate some code. The macro assembler provides methods
|
||||
for all the instructions that you can use. As it's a macro assembler,
|
||||
the instructions that you tell it to generate may not directly map to a single
|
||||
hardware instruction. Instead, it can produce a short sequence of instructions
|
||||
that has the same effect.
|
||||
|
||||
Before looking at how to generate some code, let's introduce a simple but handy
|
||||
macro:
|
||||
|
||||
#define __ masm->
|
||||
|
||||
It allows us to write `__ Mov(r0, 42);` instead of `masm->Mov(r0, 42);` to
|
||||
generate code.
|
||||
|
||||
Now we are going to write a C++ function to generate our first assembly
|
||||
code fragment.
|
||||
|
||||
void GenerateDemo(MacroAssembler *masm) {
|
||||
__ Ldr(r1, 0x12345678);
|
||||
__ And(r0, r0, r1);
|
||||
__ Bx(lr);
|
||||
}
|
||||
|
||||
The generated code corresponds to a function with the following C prototype:
|
||||
|
||||
uint32_t demo(uint32_t x);
|
||||
|
||||
This function doesn't perform any useful operation. It loads the value
|
||||
0x12345678 into r1 and performs a bitwise `and` operation with
|
||||
the function's argument (stored in r0). The result of this `and` operation
|
||||
is returned by the function in r0.
|
||||
|
||||
Now in our program main function, we only need to create a label to represent
|
||||
the entry point of the assembly function and to call `GenerateDemo` to
|
||||
generate the code.
|
||||
|
||||
Label demo;
|
||||
masm.Bind(&demo);
|
||||
GenerateDemo(&masm);
|
||||
masm.Finalize();
|
||||
|
||||
Now we are going to learn a bit more on a couple of interesting VIXL features
|
||||
which are used in this example.
|
||||
|
||||
### Label
|
||||
|
||||
VIXL's assembler provides a mechanism to represent labels with `Label` objects.
|
||||
They are easy to use: simply create the C++ object and bind it to a location in
|
||||
the generated instruction stream.
|
||||
|
||||
Creating a label is easy, since you only need to define the variable and bind it
|
||||
to a location using the macro assembler.
|
||||
|
||||
Label my_label; // Create the label object.
|
||||
__ Bind(&my_label); // Bind it to the current location.
|
||||
|
||||
The target of a branch using a label will be the address to which it has been
|
||||
bound. For example, let's consider the following code fragment:
|
||||
|
||||
Label foo;
|
||||
|
||||
__ B(&foo); // Branch to foo.
|
||||
__ Mov(r0, 42);
|
||||
__ Bind(&foo); // Actual address of foo is here.
|
||||
__ Mov(r1, 0xc001);
|
||||
|
||||
If we run this code fragment the `Mov(r0, 42)` will never be executed since
|
||||
the first thing this code does is to jump to `foo`, which correspond to the
|
||||
`Mov(r1, 0xc001)` instruction.
|
||||
|
||||
When working with labels you need to know that they are only to be used for
|
||||
local branches, and should be passed around with care. The major reason is
|
||||
that they cannot safely be passed or returned by value because this can trigger
|
||||
multiple constructor and destructor calls. The destructor has assertions
|
||||
to check that we don't try to branch to a label that hasn't been bound.
|
||||
|
||||
|
||||
### Literal Pool
|
||||
|
||||
On AArch32 instructions are 16 or 32 bits long, thus immediate values encoded in
|
||||
the instructions have limited size. If you want to load a constant bigger than
|
||||
this limit you have two possibilities:
|
||||
|
||||
1. Use multiple instructions to load the constant in multiple steps. This
|
||||
solution is already handled in VIXL. For instance you can write:
|
||||
|
||||
`__ Mov(r0, 0x12345678);`
|
||||
|
||||
The previous instruction would not be legal since the immediate value is too
|
||||
big. However, VIXL's macro assembler will automatically rewrite this line into
|
||||
multiple instructions efficiently generate the value, ultimately setting 'r0'
|
||||
with the correct value.
|
||||
|
||||
|
||||
2. Store the constant in memory and load this value from the memory. The value
|
||||
needs to be written near the code that will load it since we use a PC-relative
|
||||
offset to indicate the address of this value. This solution has the advantage
|
||||
of making the value easily modifiable at run-time; since it does not reside
|
||||
in the instruction stream, it doesn't require cache maintenance when updated.
|
||||
|
||||
VIXL also provides a way to do this:
|
||||
|
||||
`__ Ldr(r0, 0x12345678);`
|
||||
|
||||
The assembler will store the immediate value in a "literal pool", a set of
|
||||
constants embedded in the code. VIXL will emit the literal pool when needed.
|
||||
|
||||
The literal pool is emitted regularly, such that they are within range of the
|
||||
instructions that refer to it. However, you can force the literal pool to be
|
||||
emitted using `masm.EmitLiteralPool()`. It generates a branch to skip the
|
||||
pool.
|
||||
|
||||
|
||||
Running the code.
|
||||
-----------------
|
||||
|
||||
We first need to run a few operations to get executable code. The
|
||||
`ExecutableMemory` helper takes care of it:
|
||||
|
||||
byte* code = masm.GetBuffer().GetBuffer();
|
||||
uint32_t code_size = masm.GetBuffer().GetSizeInBytes();
|
||||
ExecutableMemory memory(code, code_size);
|
||||
|
||||
Then we compute a pointer to the function we just generated and copy:
|
||||
|
||||
uint32_t (*demo_function)(uint32_t) =
|
||||
memory.GetOffsetAddress<uint32_t (*)(uint32_t)>(0);
|
||||
|
||||
Now, we can call this function pointer exactly as if it were a pointer on a C
|
||||
function:
|
||||
|
||||
uint32_t input_value = 0x89abcdef;
|
||||
uint32_t output_value = (*demo_function)(input_value);
|
||||
|
||||
A little trace:
|
||||
|
||||
printf("native: abs(%08x) = %08x\n", input_value, output_value);
|
||||
|
||||
|
||||
The example shown in this tutorial is very simple, because the goal was to
|
||||
demonstrate the basics of the VIXL framework. There are more complex code
|
||||
examples in the VIXL `examples/aarch32` directory showing more features of both the
|
||||
macro assembler and the AArch32 architecture.
|
||||
|
||||
Disassembling the generated code.
|
||||
---------------------------------
|
||||
|
||||
Once you have generated something with the macro-assembler, you may want to
|
||||
disassemble it.
|
||||
|
||||
First, you must include iostream.
|
||||
|
||||
#include <iostream>
|
||||
|
||||
And the disassembler header file:
|
||||
|
||||
#include "aarch32/disasm-aarch32.h"
|
||||
|
||||
Then you have to define the pc used to disassemble (the one which is used to
|
||||
display the addresses not the location of the instructions):
|
||||
|
||||
uint32_t display_pc = 0x1000;
|
||||
|
||||
Or, if you running on a 32 bit host, you can use the real address:
|
||||
|
||||
uint32_t display_pc = static_cast<uintptr_t>(masm.GetBuffer().GetBuffer());
|
||||
|
||||
Then you can disassemble the macro assembler's buffer:
|
||||
|
||||
PrintDisassembler disasm(std::cout, display_pc);
|
||||
disasm.DisassembleA32Buffer(
|
||||
masm.GetBuffer().GetOffsetAddress<uint32_t*>(0), masm.GetCursorOffset());
|
||||
|
||||
If you generated T32 code instead of A32 code, you must use
|
||||
DisassembleT32Buffer. Warning: if your buffer contains some data or contains
|
||||
mixed T32 and A32 code, the result won't be accurate (everything will be
|
||||
disassembled as T32 or A32 code).
|
||||
|
||||
Example of disassembly:
|
||||
|
||||
0x00001000 e30f0fff mov r0, #65535
|
||||
0x00001004 e34f0fff movt r0, #65535
|
||||
0x00001008 e3041567 mov r1, #17767
|
||||
0x0000100c e3401123 movt r1, #291
|
||||
0x00001010 e3a02000 mov r2, #0
|
||||
0x00001014 e7c2001f bfc r0, #0, #3
|
||||
0x00001018 e7d4081f bfc r0, #16, #5
|
||||
0x0000101c e7c72011 bfi r2, r1, #0, #8
|
||||
0x00001020 e7df2811 bfi r2, r1, #16, #16
|
||||
0x00001024 e1000070 hlt 0
|
207
externals/vixl/vixl/doc/aarch64/getting-started-aarch64.md
vendored
Normal file
207
externals/vixl/vixl/doc/aarch64/getting-started-aarch64.md
vendored
Normal file
|
@ -0,0 +1,207 @@
|
|||
Getting Started with VIXL AArch64
|
||||
=================================
|
||||
|
||||
|
||||
This guide will show you how to use the VIXL framework for AArch64. We will see
|
||||
how to set up the VIXL assembler and generate some code. We will also go into
|
||||
details on a few useful features provided by VIXL and see how to run the
|
||||
generated code in the VIXL simulator.
|
||||
|
||||
The source code of the example developed in this guide can be found in the
|
||||
`examples/aarch64` directory (`examples/aarch64/getting-started.cc`).
|
||||
|
||||
|
||||
Creating the macro assembler and the simulator.
|
||||
-----------------------------------------------
|
||||
|
||||
First of all you need to make sure that the header files for the assembler and
|
||||
the simulator are included. You should have the following lines at the beginning
|
||||
of your source file:
|
||||
|
||||
#include "aarch64/simulator-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
All VIXL components are declared in the `vixl::aarch64` namespace, so let's add
|
||||
this to the beginning of the file for convenience:
|
||||
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
Creating a macro assembler is as simple as
|
||||
|
||||
MacroAssembler masm;
|
||||
|
||||
VIXL's assembler will generate some code at run-time, and this code needs to
|
||||
be stored in a buffer. By default the assembler will automatically manage
|
||||
the code buffer. However constructors are available that allow manual management
|
||||
of the code buffer.
|
||||
|
||||
We also need to set up the simulator. The simulator uses a Decoder object to
|
||||
read and decode the instructions from the code buffer. We need to create a
|
||||
decoder and bind our simulator to this decoder.
|
||||
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
|
||||
Generating some code.
|
||||
---------------------
|
||||
|
||||
We are now ready to generate some code. The macro assembler provides methods
|
||||
for all the instructions that you can use. As it's a macro assembler,
|
||||
the instructions that you tell it to generate may not directly map to a single
|
||||
hardware instruction. Instead, it can produce a short sequence of instructions
|
||||
that has the same effect.
|
||||
|
||||
For instance, the hardware `add` instruction can only take a 12-bit immediate
|
||||
optionally shifted by 12, but the macro assembler can generate one or more
|
||||
instructions to handle any 64-bit immediate. For example, `Add(x0, x0, -1)`
|
||||
will be turned into `Sub(x0, x0, 1)`.
|
||||
|
||||
Before looking at how to generate some code, let's introduce a simple but handy
|
||||
macro:
|
||||
|
||||
#define __ masm->
|
||||
|
||||
It allows us to write `__ Mov(x0, 42);` instead of `masm->Mov(x0, 42);` to
|
||||
generate code.
|
||||
|
||||
Now we are going to write a C++ function to generate our first assembly
|
||||
code fragment.
|
||||
|
||||
void GenerateDemoFunction(MacroAssembler *masm) {
|
||||
__ Ldr(x1, 0x1122334455667788);
|
||||
__ And(x0, x0, x1);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
The generated code corresponds to a function with the following C prototype:
|
||||
|
||||
uint64_t demo_function(uint64_t x);
|
||||
|
||||
This function doesn't perform any useful operation. It loads the value
|
||||
0x1122334455667788 into x1 and performs a bitwise `and` operation with
|
||||
the function's argument (stored in x0). The result of this `and` operation
|
||||
is returned by the function in x0.
|
||||
|
||||
Now in our program main function, we only need to create a label to represent
|
||||
the entry point of the assembly function and to call `GenerateDemoFunction` to
|
||||
generate the code.
|
||||
|
||||
Label demo_function;
|
||||
masm.Bind(&demo_function);
|
||||
GenerateDemoFunction(&masm);
|
||||
masm.Finalize();
|
||||
|
||||
Now we are going to learn a bit more on a couple of interesting VIXL features
|
||||
which are used in this example.
|
||||
|
||||
### Label
|
||||
|
||||
VIXL's assembler provides a mechanism to represent labels with `Label` objects.
|
||||
They are easy to use: simply create the C++ object and bind it to a location in
|
||||
the generated instruction stream.
|
||||
|
||||
Creating a label is easy, since you only need to define the variable and bind it
|
||||
to a location using the macro assembler.
|
||||
|
||||
Label my_label; // Create the label object.
|
||||
__ Bind(&my_label); // Bind it to the current location.
|
||||
|
||||
The target of a branch using a label will be the address to which it has been
|
||||
bound. For example, let's consider the following code fragment:
|
||||
|
||||
Label foo;
|
||||
|
||||
__ B(&foo); // Branch to foo.
|
||||
__ Mov(x0, 42);
|
||||
__ Bind(&foo); // Actual address of foo is here.
|
||||
__ Mov(x1, 0xc001);
|
||||
|
||||
If we run this code fragment the `Mov(x0, 42)` will never be executed since
|
||||
the first thing this code does is to jump to `foo`, which correspond to the
|
||||
`Mov(x1, 0xc001)` instruction.
|
||||
|
||||
When working with labels you need to know that they are only to be used for
|
||||
local branches, and should be passed around with care. There are two reasons
|
||||
for this:
|
||||
|
||||
- They can't safely be passed or returned by value because this can trigger
|
||||
multiple constructor and destructor calls. The destructor has assertions
|
||||
to check that we don't try to branch to a label that hasn't been bound.
|
||||
|
||||
- The `B` instruction does not branch to labels which are out of range of the
|
||||
branch. The `B` instruction has a range of 2^28 bytes, but other variants
|
||||
(such as conditional or `CBZ`-like branches) have smaller ranges. Confining
|
||||
them to local ranges doesn't mean that we won't hit these limits, but it
|
||||
makes the lifetime of the labels much shorter and eases the debugging of
|
||||
these kinds of issues.
|
||||
|
||||
|
||||
### Literal Pool
|
||||
|
||||
On ARMv8 instructions are 32 bits long, thus immediate values encoded in the
|
||||
instructions have limited size. If you want to load a constant bigger than this
|
||||
limit you have two possibilities:
|
||||
|
||||
1. Use multiple instructions to load the constant in multiple steps. This
|
||||
solution is already handled in VIXL. For instance you can write:
|
||||
|
||||
`__ Mov(x0, 0x1122334455667788);`
|
||||
|
||||
The previous instruction would not be legal since the immediate value is too
|
||||
big. However, VIXL's macro assembler will automatically rewrite this line into
|
||||
multiple instructions to efficiently generate the value.
|
||||
|
||||
|
||||
2. Store the constant in memory and load this value from the memory. The value
|
||||
needs to be written near the code that will load it since we use a PC-relative
|
||||
offset to indicate the address of this value. This solution has the advantage
|
||||
of making the value easily modifiable at run-time; since it does not reside
|
||||
in the instruction stream, it doesn't require cache maintenance when updated.
|
||||
|
||||
VIXL also provides a way to do this:
|
||||
|
||||
`__ Ldr(x0, 0x1122334455667788);`
|
||||
|
||||
The assembler will store the immediate value in a "literal pool", a set of
|
||||
constants embedded in the code. VIXL will emit literal pools after natural
|
||||
breaks in the control flow, such as unconditional branches or return
|
||||
instructions.
|
||||
|
||||
Literal pools are emitted regularly, such that they are within range of the
|
||||
instructions that refer to them. However, you can force a literal pool to be
|
||||
emitted using `masm.EmitLiteralPool()`.
|
||||
|
||||
|
||||
Running the code in the simulator.
|
||||
----------------------------------
|
||||
|
||||
Now we are going to see how to use the simulator to run the code that we
|
||||
generated previously.
|
||||
|
||||
Use the simulator to assign a value to the registers. Our previous code example
|
||||
uses the register x0 as an input, so let's set the value of this register.
|
||||
|
||||
simulator.WriteXRegister(0, 0x8899aabbccddeeff);
|
||||
|
||||
Now we can jump to the "entry" label to execute the code:
|
||||
|
||||
simulator.RunFrom(entry.target());
|
||||
|
||||
When the execution is finished and the simulator returned, you can inspect
|
||||
the value of the registers after the execution. For instance:
|
||||
|
||||
printf("x0 = %" PRIx64 "\n", simulator.ReadXRegister(0));
|
||||
|
||||
The example shown in this tutorial is very simple, because the goal was to
|
||||
demonstrate the basics of the VIXL framework. There are more complex code
|
||||
examples in the VIXL `examples/aarch64` directory showing more features of both the
|
||||
macro assembler and the ARMv8 architecture.
|
||||
|
||||
|
||||
Extras
|
||||
------
|
||||
|
||||
In addition to this document and the [examples](/examples/aarch64), you can find
|
||||
documentation and guides on various topics that may be helpful
|
||||
[here](/doc/aarch64/topics/index.md).
|
6242
externals/vixl/vixl/doc/aarch64/supported-instructions-aarch64.md
vendored
Normal file
6242
externals/vixl/vixl/doc/aarch64/supported-instructions-aarch64.md
vendored
Normal file
File diff suppressed because it is too large
Load diff
60
externals/vixl/vixl/doc/aarch64/topics/extending-the-disassembler.md
vendored
Normal file
60
externals/vixl/vixl/doc/aarch64/topics/extending-the-disassembler.md
vendored
Normal file
|
@ -0,0 +1,60 @@
|
|||
Extending the disassembler
|
||||
==========================
|
||||
|
||||
The output of the disassembler can be extended and customized. This may be
|
||||
useful for example to add comments and annotations to the disassembly, print
|
||||
aliases for register names, or add an offset to disassembled addresses.
|
||||
|
||||
The general procedure to achieve this is to create a sub-class of
|
||||
`Disassembler` and override the appropriate virtual functions.
|
||||
|
||||
The `Disassembler` class provides virtual methods that implement how specific
|
||||
disassembly elements are printed. See
|
||||
[src/aarch64/disasm-aarch64.h](/src/aarch64/disasm-aarch64.h) for details.
|
||||
These include functions like:
|
||||
|
||||
virtual void AppendRegisterNameToOutput(const Instruction* instr,
|
||||
const CPURegister& reg);
|
||||
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
|
||||
int64_t offset);
|
||||
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
They can be overridden for example to use different register names and annotate
|
||||
code addresses.
|
||||
|
||||
More complex modifications can be performed by overriding the visitor functions
|
||||
of the disassembler. The VIXL `Decoder` uses a visitor pattern implementation,
|
||||
so the `Disassembler` (as a sub-class of `DecoderVisitor`) must provide a
|
||||
visitor function for each sub-type of instructions. The complete list of
|
||||
visitors is defined by the macro `VISITOR_LIST` in
|
||||
[src/aarch64/decoder-aarch64.h](/src/aarch64/decoder-aarch64.h).
|
||||
|
||||
The [/examples/custom-disassembler.h](/examples/custom-disassembler.h) and
|
||||
[/examples/custom-disassembler.cc](/examples/custom-disassembler.cc) example
|
||||
files show how the methods can be overridden to use different register names,
|
||||
map code addresses, annotate code addresses, and add comments:
|
||||
|
||||
VIXL disasm 0x7fff04cb05e0: add x10, x16, x17
|
||||
custom disasm -0x8: add x10, ip0, ip1 // add/sub to x10
|
||||
|
||||
VIXL disasm 0x7fff04cb05e4: cbz x10, #+0x28 (addr 0x7fff04cb060c)
|
||||
custom disasm -0x4: cbz x10, #+0x28 (addr 0x24 ; label: somewhere)
|
||||
|
||||
VIXL disasm 0x7fff04cb05e8: add x11, x16, x17
|
||||
custom disasm 0x0: add x11, ip0, ip1
|
||||
|
||||
VIXL disasm 0x7fff04cb05ec: add w5, w6, w30
|
||||
custom disasm 0x4: add w5, w6, w30
|
||||
|
||||
VIXL disasm 0x7fff04cb05f0: tbz w10, #2, #-0x10 (addr 0x7fff04cb05e0)
|
||||
custom disasm 0x8: tbz w10, #2, #-0x10 (addr -0x8)
|
||||
|
||||
|
||||
One can refer to the implementation of visitor functions for the `Disassembler`
|
||||
(in [src/aarch64/disasm-aarch64.cc](/src/aarch64/disasm-aarch64.cc)) or even
|
||||
for the `Simulator`
|
||||
(in [src/aarch64/simulator-aarch64.cc](/src/aarch64/simulator-aarch64.cc))
|
||||
to see how to extract information from instructions.
|
8
externals/vixl/vixl/doc/aarch64/topics/index.md
vendored
Normal file
8
externals/vixl/vixl/doc/aarch64/topics/index.md
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
We will try to add documentation for topics that may be useful to VIXL users. If
|
||||
you think of any topic that may be useful and is not listed here, please contact
|
||||
us at <vixl@arm.com>.
|
||||
|
||||
You can also have a look at the ['getting started' page](../getting-started-aarch64.md).
|
||||
|
||||
* [Extending and customizing the disassembler](extending-the-disassembler.md)
|
||||
* [Using VIM YouCompleteMe with VIXL](ycm.md)
|
231
externals/vixl/vixl/doc/aarch64/topics/state-trace.md
vendored
Normal file
231
externals/vixl/vixl/doc/aarch64/topics/state-trace.md
vendored
Normal file
|
@ -0,0 +1,231 @@
|
|||
AArch64 Simulator state trace
|
||||
=============================
|
||||
|
||||
The AArch64 Simulator can be configured to produce traces of instruction
|
||||
execution, register contents, and memory accesses. The trace is designed to be
|
||||
intuitive for human readers, but this document describes the format of the
|
||||
trace, so that post-processing tools can confidently parse the output.
|
||||
|
||||
In VIXL's own test runner, the trace is controlled by the `--trace*` options.
|
||||
Run `test-runner --help` for details.
|
||||
|
||||
Basic structure
|
||||
---------------
|
||||
|
||||
Executed instructions show the address, the encoding of the instruction and the
|
||||
disassembly (as produced by VIXL's Disassembler). For example:
|
||||
|
||||
0x00007fbe2a6a9044 d299d200 mov x0, #0xce90
|
||||
|
||||
The first field is the address of the instruction, with exactly 16 hexadecimal
|
||||
characters and a leading 0x, and is followed by two spaces. The second field is
|
||||
the instruction encoding, with exactly eight hexadecimal characters (and no
|
||||
leading 0x). This is followed by two _tab_ characters, and the instruction
|
||||
disassembly. The following regular expression can be used to capture each field:
|
||||
|
||||
(0x[0-9a-f]{16}) ([0-9a-f]{8})\t\t(.*)
|
||||
|
||||
Following each instruction are zero or more lines of state update. Most notably,
|
||||
these represent the register state updates and memory accesses that occurred
|
||||
during simulation of the instruction. All of these lines begin with a '#'
|
||||
character, so that they can be easily identified, and filtered if necessary. For
|
||||
example:
|
||||
|
||||
0x00007fd2221c907c 8b82200e add x14, x0, x2, asr #8
|
||||
# x14: 0xfffedcba98765432
|
||||
0x00007fd2221c9080 0b81200f add w15, w0, w1, asr #8
|
||||
# w15: 0xff89abcd
|
||||
|
||||
Note that the Simulator uses these state update lines to describe its initial
|
||||
state. As a result, there will be state trace output before the first simulated
|
||||
instruction, and parsers need to be tolerant of this.
|
||||
|
||||
Note that padding white space is used liberally to keep values vertically
|
||||
aligned throughout the trace (as shown with the write to `w15` in the example
|
||||
above). Similarly, some compound values are split into parts using the C++14
|
||||
literal separator (`'`) character. Refer to the "Memory accesses" section
|
||||
(below) for examples.
|
||||
|
||||
Ordering
|
||||
--------
|
||||
|
||||
VIXL guarantees that each instruction is printed before its associated state
|
||||
trace.
|
||||
|
||||
State trace must be interpreted sequentially, line by line. VIXL avoids updating
|
||||
the same register more than once (because it makes the trace hard for humans to
|
||||
read), but this can occur in some situations, and should be supported by
|
||||
parsers.
|
||||
|
||||
The state is intended to be consistent with architectural execution at the start
|
||||
of each instruction and at the end of the whole trace, but no such guarantees
|
||||
are made about the traced state _between_ instructions. VIXL prioritises
|
||||
human-readability when choosing the ordering of state updates.
|
||||
|
||||
If simulated registers are modified externally, for example using
|
||||
`WriteRegister` from C++ code, their state will (by default) be logged
|
||||
immediately. In the full trace, it will appear as though the (runtime) call or
|
||||
return instruction modified the state. This is consistent with the guarantees
|
||||
above, but it can result in single instructions appearing to generate a large
|
||||
number of state updates.
|
||||
|
||||
There is no upper limit on the number of state update lines that any one
|
||||
instruction can generate.
|
||||
|
||||
Whole register trace
|
||||
--------------------
|
||||
|
||||
The simplest form of state trace has the form "`REG: VALUE`", meaning that
|
||||
the register `REG` has the specified value, and any high-order bits in aliased
|
||||
registers are set to zero.
|
||||
|
||||
0x00007fd2221c907c 8b82200e add x14, x0, x2, asr #8
|
||||
# x14: 0xfffedcba98765432
|
||||
|
||||
Note that to correctly track state, parsers need to be aware of architectural
|
||||
register aliasing rules. Also, VIXL uses some standard register aliases, such as
|
||||
`lr` (`x30`). To avoid misinterpreting a register alias (and thereby potentially
|
||||
missing an aliased register update), some tools may need to treat an
|
||||
unrecognised register name as an error.
|
||||
|
||||
This trace format attempts to represent _architectural_ register writes.
|
||||
However, this is not strictly checked or enforced.
|
||||
|
||||
`VALUE` is always shown in hexadecimal (raw bits) form, with a leading `0x` and
|
||||
enough digits to exactly fill `REG`. `VALUE` may also include annotations (for
|
||||
example to show FP arithmetic values) in parentheses. These annotations are for
|
||||
the benefit of human readers, and parsers may ignore them.
|
||||
|
||||
Note that SVE registers _always_ use the partial register trace format,
|
||||
described below, so a plain `z` or `p` register will never be used in a whole
|
||||
register trace. This is true even if the vector length is configured to 16
|
||||
bytes.
|
||||
|
||||
Partial register trace
|
||||
----------------------
|
||||
|
||||
Sometimes, VIXL needs to show _part_ of a register without implying that the
|
||||
rest of the register is zeroed. A partial register value is indicated by a bit
|
||||
range in angled brackets after the register name: "`REG<MSB:LSB>: VALUE`".
|
||||
This format is used for stores, for example.
|
||||
|
||||
SVE register updates are split across multiple lines, and therefore always use
|
||||
the partial register trace format. For example (with a 384-bit VL):
|
||||
|
||||
0x00007fb1978da044 04214000 index z0.b, #0, #1
|
||||
# z0<383:256>: 0x2f2e2d2c2b2a29282726252423222120
|
||||
# z0<255:128>: 0x1f1e1d1c1b1a19181716151413121110
|
||||
# z0<127:0>: 0x0f0e0d0c0b0a09080706050403020100
|
||||
|
||||
Note that VIXL will omit whole lines where they are unnecessary, for example if
|
||||
they have no active (predicated) lanes. Parsers should not assume that every
|
||||
part of a register will appear in such cases.
|
||||
|
||||
The `VALUE` has the same format as in the whole register trace, except in the
|
||||
case of SVE `p` registers (as described below).
|
||||
|
||||
SVE `p` registers
|
||||
-----------------
|
||||
|
||||
For `p` registers, we try to keep the lanes vertically aligned with the
|
||||
corresponding parts of the `z` registers that they affect. To do this, we use a
|
||||
binary format, with a leading `0b`, and spaces between each digit. For example:
|
||||
|
||||
0x00007f66e539b0b8 04f54607 index z7.d, x16, #-11
|
||||
# z7<127:0>: 0x00000000000000150000000000000020
|
||||
0x00007f66e539b0bc 25d8e3a7 ptrue p7.d, all
|
||||
# p7<15:0>: 0b 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1
|
||||
|
||||
Memory accesses
|
||||
---------------
|
||||
|
||||
The pattern for a memory access is "`VALUE OP ADDRESS`", where:
|
||||
|
||||
- `VALUE` is a hexadecimal value, with visual separators (') between
|
||||
structure components,
|
||||
- `OP` is `"->"` for a store, or `"<-"` for a load,
|
||||
- `ADDRESS` is the (hexadecimal) address of the access.
|
||||
|
||||
Accesses shown in this style are always contiguous, and with little-endian
|
||||
semantics. However, a given instruction might have multiple lines of memory
|
||||
access trace, particularly if the instruction performs non-contiguous accesses.
|
||||
|
||||
In the case of simple accesses, the `VALUE` is shared with register value trace:
|
||||
|
||||
0x00007f3835372058 e400e401 st1b { z1.b }, p1, [x0]
|
||||
# z1<127:0>: 0xd4d7dadde0e3e6e9eceff2f5f8fbfe01 -> 0x000055d170298e90
|
||||
|
||||
Sign-extending loads show the whole resulting register value, with the (smaller)
|
||||
access represented on a separate line. This makes the (differing) values in the
|
||||
register and in memory unambiguous, without parsers needing to understand the
|
||||
instruction set:
|
||||
|
||||
0x00007f47922d0068 79800306 ldrsh x6, [x24]
|
||||
# x6: 0xffffffffffff8080
|
||||
# ╙─ 0x8080 <- 0x00007fffbc197708
|
||||
|
||||
Some instructions access several different memory locations. In these cases,
|
||||
each access is given its own line, with the highest lane index first so that
|
||||
(for contiguous accesses) the lowest address ends up at the bottom:
|
||||
|
||||
0x00007fa6001e9060 e4217c0a st2b { z10.b, z11.b }, p7, [x0, x1]
|
||||
# z10<127:0>: 0x0f0e0d0c0b0a09080706050403020100
|
||||
# z11<127:0>: 0x1f1e1d1c1b1a19181716151413121110
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─ 0x10'00 -> 0x00007ffe485d2f90
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─── 0x11'01 -> 0x00007ffe485d2f92
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙───── 0x12'02 -> 0x00007ffe485d2f94
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─────── 0x13'03 -> 0x00007ffe485d2f96
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙───────── 0x14'04 -> 0x00007ffe485d2f98
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙─────────── 0x15'05 -> 0x00007ffe485d2f9a
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ║ ╙───────────── 0x16'06 -> 0x00007ffe485d2f9c
|
||||
# ║ ║ ║ ║ ║ ║ ║ ║ ╙─────────────── 0x17'07 -> 0x00007ffe485d2f9e
|
||||
# ║ ║ ║ ║ ║ ║ ║ ╙───────────────── 0x18'08 -> 0x00007ffe485d2fa0
|
||||
# ║ ║ ║ ║ ║ ║ ╙─────────────────── 0x19'09 -> 0x00007ffe485d2fa2
|
||||
# ║ ║ ║ ║ ║ ╙───────────────────── 0x1a'0a -> 0x00007ffe485d2fa4
|
||||
# ║ ║ ║ ║ ╙─────────────────────── 0x1b'0b -> 0x00007ffe485d2fa6
|
||||
# ║ ║ ║ ╙───────────────────────── 0x1c'0c -> 0x00007ffe485d2fa8
|
||||
# ║ ║ ╙─────────────────────────── 0x1d'0d -> 0x00007ffe485d2faa
|
||||
# ║ ╙───────────────────────────── 0x1e'0e -> 0x00007ffe485d2fac
|
||||
# ╙─────────────────────────────── 0x1f'0f -> 0x00007ffe485d2fae
|
||||
|
||||
The line-drawing characters are encoded as UTF-8 (as is this document). There is
|
||||
currently no locale handling in VIXL, so this is not configurable. However,
|
||||
since these annotations are for the benefit of human readers, parsers can safely
|
||||
ignore them, and treat the whole trace as an ASCII byte stream (ignoring 8-bit
|
||||
characters). This is useful in situations where UTF-8 handling carries an
|
||||
unacceptable performance cost.
|
||||
|
||||
In the future, VIXL may offer an option to avoid printing these annotations, so
|
||||
that the trace is restricted to single-byte characters.
|
||||
|
||||
Floating-point value annotations
|
||||
--------------------------------
|
||||
|
||||
Some floating-point operations produce register trace that annotates the raw
|
||||
values with the corresponding FP arithmetic values. This is for the benefit of
|
||||
human readers (and has limited precision). Such annotations follow the `VALUE`
|
||||
in parentheses.
|
||||
|
||||
Scalar form:
|
||||
|
||||
# s1: 0x3f800000 (1.000) <- 0x00007ffdc64d2314
|
||||
|
||||
Vector form, updating all S lanes using a load:
|
||||
|
||||
# v16: 0x1211100f0e0d0c0b0a09080706050403 (4.577e-28, 1.739e-30, 6.598e-33, 2.502e-35)
|
||||
# ║ ║ ║ ╙─ 0x06050403 <- 0x00007ffe56fd7863
|
||||
# ║ ║ ╙───────── 0x0a090807 <- 0x00007ffe56fd7867
|
||||
# ║ ╙───────────────── 0x0e0d0c0b <- 0x00007ffe56fd786b
|
||||
# ╙───────────────────────── 0x1211100f <- 0x00007ffe56fd786f
|
||||
|
||||
Vector form, updating a single S lane using a load:
|
||||
|
||||
# v2: 0x03020100040302017ff0f0027f80f000 (..., 1.540e-36, ...)
|
||||
# ╙───────────────── 0x04030201 <- 0x00007ffc7b2e3ca1
|
||||
|
||||
Vector form, replicating a single struct load to all S lanes:
|
||||
|
||||
# v15: 0x100f0e0d100f0e0d100f0e0d100f0e0d (2.821e-29, 2.821e-29, 2.821e-29, 2.821e-29)
|
||||
# v16: 0x14131211141312111413121114131211 (7.425e-27, 7.425e-27, 7.425e-27, 7.425e-27)
|
||||
# v17: 0x18171615181716151817161518171615 (1.953e-24, 1.953e-24, 1.953e-24, 1.953e-24)
|
||||
# ╙───────╨───────╨───────╨─ 0x18171615'14131211'100f0e0d <- 0x00007ffdd64d847d
|
9
externals/vixl/vixl/doc/aarch64/topics/ycm.md
vendored
Normal file
9
externals/vixl/vixl/doc/aarch64/topics/ycm.md
vendored
Normal file
|
@ -0,0 +1,9 @@
|
|||
VIM YouCompleteMe for VIXL
|
||||
==========================
|
||||
|
||||
[YouCompleteMe](https://github.com/Valloric/YouCompleteMe) is a code completion
|
||||
engine for VIM. VIXL includes a `.ycm_extra_conf.py` to configure YCM to work in
|
||||
the VIXL repository.
|
||||
|
||||
All you need to do to get things working is to [install YCM](https://github.com/Valloric/YouCompleteMe#full-installation-guide),
|
||||
preferably with semantic completion for C-family languages.
|
124
externals/vixl/vixl/doc/changelog.md
vendored
Normal file
124
externals/vixl/vixl/doc/changelog.md
vendored
Normal file
|
@ -0,0 +1,124 @@
|
|||
VIXL Change Log
|
||||
===============
|
||||
|
||||
* 1.13
|
||||
+ Improve code formatting and add tests using clang-format.
|
||||
+ Fix bugs in disassembly of unallocated instruction encodings.
|
||||
+ Fix some execution trace bugs, and add tests.
|
||||
+ Other small bug fixes and improvements.
|
||||
|
||||
* 1.12
|
||||
+ Bug fixes for toolchain compatibility.
|
||||
|
||||
* 1.11
|
||||
+ Fix bug in simulation of add with carry.
|
||||
+ Fix use-after-free bug in Literal handling.
|
||||
+ Build system updates for Android.
|
||||
+ Add option to run test.py under Valgrind.
|
||||
+ Other small bug fixes and improvements.
|
||||
|
||||
* 1.10
|
||||
+ Improved support for externally managed literals.
|
||||
+ Reworked build and test infrastructure.
|
||||
+ Other small bug fixes and improvements.
|
||||
|
||||
* 1.9
|
||||
+ Improved compatibility with Android build system.
|
||||
+ Improved compatibility with Clang toolchain.
|
||||
+ Added support for `umulh` instruction.
|
||||
+ Added support for `fcmpe` and `fccmpe` instructions.
|
||||
+ Other small bug fixes and improvements.
|
||||
|
||||
* 1.8
|
||||
+ Complete NEON instruction set support.
|
||||
+ Support long branches using veneers.
|
||||
+ Improved handling of literal pools.
|
||||
+ Support some `ic` and `dc` cache op instructions.
|
||||
+ Support CRC32 instructions.
|
||||
+ Support half-precision floating point instructions.
|
||||
+ MacroAssembler support for `bfm`, `ubfm` and `sbfm`.
|
||||
+ Other small bug fixes and improvements.
|
||||
|
||||
* 1.7
|
||||
+ Added support for `prfm` prefetch instructions.
|
||||
+ Added support for all `frint` instruction variants.
|
||||
+ Add support for disassembling as an offset from a given address.
|
||||
+ Fixed the disassembly of `movz` and `movn`.
|
||||
+ Provide static helpers for immediate generation.
|
||||
+ Provide helpers to create CPURegList from list unions or intersections.
|
||||
+ Improved register value tracing.
|
||||
+ Multithreading test fixes.
|
||||
+ Other small bug fixes and build system improvements.
|
||||
|
||||
* 1.6
|
||||
+ Make literal pool management the responsibility of the macro assembler.
|
||||
+ Move code buffer management out of the Assembler.
|
||||
+ Support `ldrsw` for literals.
|
||||
+ Support binding a label to a specific offset.
|
||||
+ Add macro assembler support for load/store pair with arbitrary offset.
|
||||
+ Support Peek and Poke for CPURegLists.
|
||||
+ Fix disassembly of branch targets.
|
||||
+ Fix Decoder visitor insertion order.
|
||||
+ Separate Decoder visitors into const and non-const variants.
|
||||
+ Fix simulator for branches to tagged addresses.
|
||||
+ Add a VIM YouCompleteMe configuration file.
|
||||
+ Other small bug fixes and build system improvements.
|
||||
|
||||
* 1.5
|
||||
+ Tagged pointer support.
|
||||
+ Implement support for exclusive access instructions.
|
||||
+ Implement support for `adrp` instruction.
|
||||
+ Faster code for logical immediate identification.
|
||||
+ Generate better code for immediates passed to shift-capable instructions.
|
||||
+ Allow explicit use of unscaled-offset loads and stores.
|
||||
+ Build and test infrastructure improvements.
|
||||
+ Corrected computation of cache line size.
|
||||
+ Fix simulation of `extr` instruction.
|
||||
+ Fixed a bug when moving kWMinInt to a register.
|
||||
+ Other small bug fixes.
|
||||
|
||||
* 1.4
|
||||
+ Added support for `frintm`.
|
||||
+ Fixed simulation of `frintn` and `frinta` for corner cases.
|
||||
+ Added more tests for floating point instruction simulation.
|
||||
+ Modified `CalleeSave()` and `CalleeRestore()` to push general purpose
|
||||
registers before floating point registers on the stack.
|
||||
+ Fixed Printf for mixed argument types, and use on real hardware.
|
||||
+ Improved compatibility with some 32-bit compilers.
|
||||
|
||||
* 1.3
|
||||
+ Address inaccuracies in the simulated floating point instructions.
|
||||
+ Implement Default-NaN floating point mode.
|
||||
+ Introduce `UseScratchRegisterScope` for controlling the use of temporary
|
||||
registers.
|
||||
+ Enable building VIXL on 32-bit hosts.
|
||||
+ Other small bug fixes and improvements.
|
||||
|
||||
* 1.2
|
||||
+ Added support for `fmadd`, `fnmadd`, `fnmsub`, `fminnm`, `fmaxnm`,
|
||||
`frinta`, `fcvtau` and `fcvtas`.
|
||||
+ Added support for assembling and disassembling `isb`, `dsb` and `dmb`.
|
||||
+ Added support for automatic inversion of compare instructions when using
|
||||
negative immediates.
|
||||
+ Added support for using `movn` when generating immediates.
|
||||
+ Added explicit flag-setting 'S' instructions, and removed
|
||||
`SetFlags` and `LeaveFlags` arguments.
|
||||
+ Added support for `Movk` in macro assembler.
|
||||
+ Added support for W register parameters to `Tbz` and `Tbnz`.
|
||||
+ Added support for using immediate operands with `Csel`.
|
||||
+ Added new debugger syntax for memory inspection.
|
||||
+ Fixed `smull`, `fmsub` and `sdiv` simulation.
|
||||
+ Fixed sign extension for W->X conversions using `sxtb`, `sxth` and `sxtw`.
|
||||
+ Prevented code generation for certain side-effect free operations,
|
||||
such as `add r, r, #0`, in the macro assembler.
|
||||
+ Other small bug fixes.
|
||||
|
||||
* 1.1
|
||||
+ Improved robustness of instruction decoder and disassembler.
|
||||
+ Added support for double-to-float conversions using `fcvt`.
|
||||
+ Added support for more fixed-point to floating-point conversions (`ucvtf`
|
||||
and `scvtf`).
|
||||
+ Added instruction statistics collection class `instrument-a64.cc`.
|
||||
|
||||
* 1.0
|
||||
+ Initial release.
|
71
externals/vixl/vixl/examples/aarch32/abs.cc
vendored
Normal file
71
externals/vixl/vixl/examples/aarch32/abs.cc
vendored
Normal file
|
@ -0,0 +1,71 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateAbs(MacroAssembler* masm) {
|
||||
// int32_t abs(int32_t x)
|
||||
// Argument location:
|
||||
// x -> r0
|
||||
|
||||
__ Cmp(r0, 0);
|
||||
// If r0 is negative, negate r0.
|
||||
__ Rsb(mi, r0, r0, 0);
|
||||
__ Bx(lr);
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main() {
|
||||
MacroAssembler masm(A32);
|
||||
// Generate the code for the example function.
|
||||
Label abs;
|
||||
masm.Bind(&abs);
|
||||
GenerateAbs(&masm);
|
||||
masm.FinalizeCode();
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
|
||||
// There is no simulator defined for VIXL AArch32.
|
||||
printf("This example cannot be simulated\n");
|
||||
#else
|
||||
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
|
||||
uint32_t code_size = masm.GetSizeOfCodeGenerated();
|
||||
ExecutableMemory memory(code, code_size);
|
||||
// Run the example function.
|
||||
int32_t (*abs_function)(int32_t) =
|
||||
memory.GetEntryPoint<int32_t (*)(int32_t)>(abs,
|
||||
masm.GetInstructionSetInUse());
|
||||
int32_t input_value = -42;
|
||||
int32_t output_value = (*abs_function)(input_value);
|
||||
printf("native: abs(%d) = %d\n", input_value, output_value);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif // TEST_EXAMPLES
|
151
externals/vixl/vixl/examples/aarch32/custom-aarch32-disasm.cc
vendored
Normal file
151
externals/vixl/vixl/examples/aarch32/custom-aarch32-disasm.cc
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/disasm-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/macro-assembler-aarch32.h"
|
||||
|
||||
#define __ masm.
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
// Example of disassembly customization.
|
||||
|
||||
class CustomStream : public Disassembler::DisassemblerStream {
|
||||
std::map<Location::Offset, const char*> symbols_;
|
||||
|
||||
public:
|
||||
CustomStream() : Disassembler::DisassemblerStream(std::cout) {}
|
||||
std::map<Location::Offset, const char*>& GetSymbols() { return symbols_; }
|
||||
virtual DisassemblerStream& operator<<(const Disassembler::PrintLabel& label)
|
||||
VIXL_OVERRIDE {
|
||||
std::map<Location::Offset, const char*>::iterator symbol =
|
||||
symbols_.find(label.GetLocation());
|
||||
// If the label was named, print the name instead of the address.
|
||||
if (symbol != symbols_.end()) {
|
||||
os() << symbol->second;
|
||||
return *this;
|
||||
}
|
||||
os() << label;
|
||||
return *this;
|
||||
}
|
||||
virtual DisassemblerStream& operator<<(const Register reg) VIXL_OVERRIDE {
|
||||
// Print all the core registers with an upper-case letter instead of the
|
||||
// default lower-case.
|
||||
os() << "R" << reg.GetCode();
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
class CustomDisassembler : public PrintDisassembler {
|
||||
public:
|
||||
explicit CustomDisassembler(CustomStream* stream)
|
||||
: PrintDisassembler(stream) {}
|
||||
CustomStream* GetStream() const {
|
||||
return reinterpret_cast<CustomStream*>(&os());
|
||||
}
|
||||
virtual void PrintCodeAddress(uint32_t pc) VIXL_OVERRIDE {
|
||||
// If the address matches a label, then print the label. Otherwise, print
|
||||
// nothing.
|
||||
std::map<Location::Offset, const char*>::iterator symbol =
|
||||
GetStream()->GetSymbols().find(pc);
|
||||
if (symbol != GetStream()->GetSymbols().end()) {
|
||||
os().os() << symbol->second << ":" << std::endl;
|
||||
}
|
||||
// Add indentation for instructions.
|
||||
os() << " ";
|
||||
}
|
||||
virtual void PrintOpcode16(uint32_t opcode) VIXL_OVERRIDE {
|
||||
// Do not print opcodes.
|
||||
USE(opcode);
|
||||
}
|
||||
virtual void PrintOpcode32(uint32_t opcode) VIXL_OVERRIDE {
|
||||
// Do not print opcodes.
|
||||
USE(opcode);
|
||||
}
|
||||
};
|
||||
|
||||
class NamedLabel : public Label {
|
||||
CustomStream* stream_;
|
||||
const char* name_;
|
||||
|
||||
public:
|
||||
NamedLabel(CustomStream* stream, const char* name)
|
||||
: stream_(stream), name_(name) {}
|
||||
~NamedLabel() {
|
||||
if (IsBound()) {
|
||||
stream_->GetSymbols().insert(
|
||||
std::pair<Location::Offset, const char*>(GetLocation(), name_));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void RunCustomDisassemblerTest() {
|
||||
CustomStream stream;
|
||||
MacroAssembler masm;
|
||||
{
|
||||
NamedLabel loop(&stream, "loop");
|
||||
NamedLabel end(&stream, "end");
|
||||
__ Mov(r0, 0);
|
||||
__ Mov(r1, 0);
|
||||
__ Bind(&loop);
|
||||
__ Cmp(r1, 20);
|
||||
__ B(gt, &end);
|
||||
__ Add(r0, r0, r1);
|
||||
__ Add(r1, r1, 1);
|
||||
__ B(&loop);
|
||||
__ Bind(&end);
|
||||
__ Bx(lr);
|
||||
__ FinalizeCode();
|
||||
}
|
||||
std::cout << "Custom disassembly:" << std::endl;
|
||||
CustomDisassembler custom_disassembler(&stream);
|
||||
custom_disassembler
|
||||
.DisassembleA32Buffer(masm.GetBuffer()->GetOffsetAddress<uint32_t*>(0),
|
||||
masm.GetBuffer()->GetSizeInBytes());
|
||||
std::cout << std::endl;
|
||||
std::cout << "Standard disassembly:" << std::endl;
|
||||
PrintDisassembler print_disassembler(std::cout);
|
||||
print_disassembler
|
||||
.DisassembleA32Buffer(masm.GetBuffer()->GetOffsetAddress<uint32_t*>(0),
|
||||
masm.GetBuffer()->GetSizeInBytes());
|
||||
}
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main() {
|
||||
vixl::aarch32::RunCustomDisassemblerTest();
|
||||
return 0;
|
||||
}
|
||||
#endif // TEST_EXAMPLES
|
325
externals/vixl/vixl/examples/aarch32/disasm-a32.cc
vendored
Normal file
325
externals/vixl/vixl/examples/aarch32/disasm-a32.cc
vendored
Normal file
|
@ -0,0 +1,325 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
// The example assumes support for ELF binaries.
|
||||
#ifdef __linux__
|
||||
|
||||
extern "C" {
|
||||
#include <elf.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdint.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
}
|
||||
|
||||
#include <cerrno>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "globals-vixl.h"
|
||||
#include "aarch32/disasm-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
|
||||
class Symbol {
|
||||
Elf32_Addr addr_;
|
||||
int32_t offset_;
|
||||
uint32_t size_;
|
||||
int section_;
|
||||
std::string name_;
|
||||
|
||||
public:
|
||||
Symbol(const char* name,
|
||||
Elf32_Addr addr,
|
||||
int32_t offset,
|
||||
uint32_t size,
|
||||
int section)
|
||||
: addr_(addr),
|
||||
offset_(offset),
|
||||
size_(size),
|
||||
section_(section),
|
||||
name_(name) {}
|
||||
Symbol(const Symbol& ref)
|
||||
: addr_(ref.addr_),
|
||||
offset_(ref.offset_),
|
||||
size_(ref.size_),
|
||||
section_(ref.section_),
|
||||
name_(ref.name_) {}
|
||||
|
||||
Elf32_Addr GetAddress() const { return addr_; }
|
||||
Elf32_Addr GetMemoryAddress() const { return (addr_ & ~1) + offset_; }
|
||||
uint32_t GetSize() const { return size_; }
|
||||
const std::string& GetName() const { return name_; }
|
||||
int GetSection() const { return section_; }
|
||||
};
|
||||
|
||||
|
||||
class SymbolTable : public std::map<Elf32_Addr, Symbol> {
|
||||
public:
|
||||
void insert(const Symbol& sym) {
|
||||
VIXL_ASSERT(find(sym.GetAddress()) == end());
|
||||
std::map<Elf32_Addr, Symbol>::insert(
|
||||
std::make_pair(sym.GetMemoryAddress(), sym));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class SectionLocator {
|
||||
const Elf32_Shdr* shdr_;
|
||||
int nsections_;
|
||||
const char* shstrtab_;
|
||||
|
||||
public:
|
||||
explicit SectionLocator(const Elf32_Ehdr* ehdr) {
|
||||
shdr_ = reinterpret_cast<const Elf32_Shdr*>(
|
||||
reinterpret_cast<const char*>(ehdr) + ehdr->e_shoff);
|
||||
// shstrtab holds the section names as an offset in the file.
|
||||
shstrtab_ =
|
||||
reinterpret_cast<const char*>(ehdr) + shdr_[ehdr->e_shstrndx].sh_offset;
|
||||
nsections_ = ehdr->e_shnum;
|
||||
}
|
||||
|
||||
const Elf32_Shdr* Locate(Elf32_Word type,
|
||||
const std::string& section_name) const {
|
||||
for (int shnum = 1; shnum < nsections_; shnum++) {
|
||||
if ((shdr_[shnum].sh_type == type) &&
|
||||
std::string(shstrtab_ + shdr_[shnum].sh_name) == section_name) {
|
||||
return &shdr_[shnum];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template <typename VISITOR>
|
||||
void LocateSymbols(const Elf32_Ehdr* ehdr,
|
||||
const Elf32_Shdr* symtab,
|
||||
const Elf32_Shdr* strtab,
|
||||
VISITOR* visitor) {
|
||||
if ((symtab != NULL) && (strtab != NULL)) {
|
||||
const Elf32_Shdr* shdr = reinterpret_cast<const Elf32_Shdr*>(
|
||||
reinterpret_cast<const char*>(ehdr) + ehdr->e_shoff);
|
||||
|
||||
const char* symnames =
|
||||
reinterpret_cast<const char*>(ehdr) + strtab->sh_offset;
|
||||
VIXL_CHECK(symnames != NULL);
|
||||
|
||||
int nsym = symtab->sh_size / symtab->sh_entsize;
|
||||
const Elf32_Sym* sym = reinterpret_cast<const Elf32_Sym*>(
|
||||
reinterpret_cast<const char*>(ehdr) + symtab->sh_offset);
|
||||
for (int snum = 0; snum < nsym; snum++) {
|
||||
if ((sym[snum].st_shndx > 0) && (sym[snum].st_shndx < ehdr->e_shnum) &&
|
||||
(sym[snum].st_value != 0) &&
|
||||
(shdr[sym[snum].st_shndx].sh_type == SHT_PROGBITS) &&
|
||||
((ELF32_ST_BIND(sym[snum].st_info) == STB_LOCAL) ||
|
||||
(ELF32_ST_BIND(sym[snum].st_info) == STB_GLOBAL)) &&
|
||||
(ELF32_ST_TYPE(sym[snum].st_info) == STT_FUNC)) {
|
||||
visitor->visit(symnames + sym[snum].st_name, sym[snum]);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class DynamicSymbolVisitor {
|
||||
SymbolTable* symbols_;
|
||||
|
||||
public:
|
||||
explicit DynamicSymbolVisitor(SymbolTable* symbols) : symbols_(symbols) {}
|
||||
void visit(const char* symname, const Elf32_Sym& sym) {
|
||||
symbols_->insert(
|
||||
Symbol(symname, sym.st_value, 0, sym.st_size, sym.st_shndx));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class StaticSymbolVisitor {
|
||||
const Elf32_Ehdr* ehdr_;
|
||||
const Elf32_Shdr* shdr_;
|
||||
SymbolTable* symbols_;
|
||||
|
||||
public:
|
||||
StaticSymbolVisitor(const Elf32_Ehdr* ehdr, SymbolTable* symbols)
|
||||
: ehdr_(ehdr),
|
||||
shdr_(reinterpret_cast<const Elf32_Shdr*>(
|
||||
reinterpret_cast<const char*>(ehdr) + ehdr->e_shoff)),
|
||||
symbols_(symbols) {}
|
||||
void visit(const char* symname, const Elf32_Sym& sym) {
|
||||
if (ehdr_->e_type == ET_REL) {
|
||||
symbols_->insert(Symbol(symname,
|
||||
sym.st_value,
|
||||
shdr_[sym.st_shndx].sh_offset,
|
||||
sym.st_size,
|
||||
sym.st_shndx));
|
||||
} else {
|
||||
symbols_->insert(
|
||||
Symbol(symname,
|
||||
sym.st_value,
|
||||
shdr_[sym.st_shndx].sh_offset - shdr_[sym.st_shndx].sh_addr,
|
||||
sym.st_size,
|
||||
sym.st_shndx));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
void usage() {
|
||||
std::cout << "usage: disasm-a32 <file>\n"
|
||||
"where <file> is an ELF ARM binaryfile, either an executable, "
|
||||
"a shared object, or an object file."
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
const int kErrorNotARMELF32 = -1;
|
||||
const int kErrorArguments = -2;
|
||||
if (argc < 2) {
|
||||
usage();
|
||||
return kErrorArguments;
|
||||
}
|
||||
|
||||
const char* filename = argv[1];
|
||||
struct stat sb;
|
||||
|
||||
|
||||
if (lstat(filename, &sb) == -1) {
|
||||
std::cerr << "Cannot stat this file" << filename << std::endl;
|
||||
return errno;
|
||||
}
|
||||
|
||||
if (S_ISLNK(sb.st_mode)) {
|
||||
static char linkname[4096];
|
||||
filename = realpath(argv[1], linkname);
|
||||
if (lstat(linkname, &sb) == -1) {
|
||||
std::cerr << "Cannot stat this file: " << linkname << std::endl;
|
||||
return errno;
|
||||
}
|
||||
}
|
||||
|
||||
int elf_in;
|
||||
if ((elf_in = open(filename, O_RDONLY)) < 0) {
|
||||
std::cerr << "Cannot open: " << argv[1];
|
||||
if (filename != argv[1]) std::cerr << " aka " << filename;
|
||||
std::cerr << std::endl;
|
||||
return errno;
|
||||
}
|
||||
|
||||
char* base_addr;
|
||||
VIXL_CHECK((base_addr = reinterpret_cast<char*>(
|
||||
mmap(0, sb.st_size, PROT_READ, MAP_PRIVATE, elf_in, 0))) !=
|
||||
0);
|
||||
|
||||
const Elf32_Ehdr* ehdr = reinterpret_cast<const Elf32_Ehdr*>(base_addr);
|
||||
if ((ehdr->e_ident[0] != 0x7f) || (ehdr->e_ident[1] != 'E') ||
|
||||
(ehdr->e_ident[2] != 'L') || (ehdr->e_ident[3] != 'F') ||
|
||||
(ehdr->e_ehsize != sizeof(Elf32_Ehdr))) {
|
||||
std::cerr << "This file is not an 32-bit ELF file." << std::endl;
|
||||
munmap(base_addr, sb.st_size);
|
||||
return kErrorNotARMELF32;
|
||||
}
|
||||
|
||||
if (ehdr->e_machine != EM_ARM) {
|
||||
std::cerr << "This file is not using the ARM isa." << std::endl;
|
||||
munmap(base_addr, sb.st_size);
|
||||
return kErrorNotARMELF32;
|
||||
}
|
||||
|
||||
// shstrtab holds the section names as an offset in the file.
|
||||
const Elf32_Shdr* shdr =
|
||||
reinterpret_cast<const Elf32_Shdr*>(base_addr + ehdr->e_shoff);
|
||||
|
||||
SectionLocator section_locator(ehdr);
|
||||
|
||||
SymbolTable symbol_names;
|
||||
|
||||
// Traverse the dynamic symbols defined in any text section
|
||||
DynamicSymbolVisitor dynamic_visitor(&symbol_names);
|
||||
LocateSymbols(ehdr,
|
||||
section_locator.Locate(SHT_DYNSYM, ".dynsym"),
|
||||
section_locator.Locate(SHT_STRTAB, ".dynstr"),
|
||||
&dynamic_visitor);
|
||||
|
||||
// Traverse the static symbols defined in the any test section
|
||||
StaticSymbolVisitor static_visitor(ehdr, &symbol_names);
|
||||
LocateSymbols(ehdr,
|
||||
section_locator.Locate(SHT_SYMTAB, ".symtab"),
|
||||
section_locator.Locate(SHT_STRTAB, ".strtab"),
|
||||
&static_visitor);
|
||||
|
||||
|
||||
vixl::aarch32::PrintDisassembler dis(std::cout, 0);
|
||||
for (SymbolTable::iterator sres = symbol_names.begin();
|
||||
sres != symbol_names.end();
|
||||
sres++) {
|
||||
const Symbol& symbol = sres->second;
|
||||
uint32_t func_addr = symbol.GetAddress();
|
||||
uint32_t func_size = symbol.GetSize();
|
||||
if (func_size == 0) {
|
||||
SymbolTable::iterator next_func = sres;
|
||||
next_func++;
|
||||
if (next_func == symbol_names.end()) {
|
||||
const Elf32_Shdr& shndx = shdr[sres->second.GetSection()];
|
||||
func_size = (shndx.sh_offset + shndx.sh_size) - sres->first;
|
||||
} else {
|
||||
func_size = next_func->first - sres->first;
|
||||
}
|
||||
}
|
||||
|
||||
std::cout << "--- " << symbol.GetName() << ":" << std::endl;
|
||||
if ((func_addr & 1) == 1) {
|
||||
func_addr &= ~1;
|
||||
dis.SetCodeAddress(func_addr);
|
||||
dis.DisassembleT32Buffer(reinterpret_cast<uint16_t*>(
|
||||
base_addr + symbol.GetMemoryAddress()),
|
||||
func_size);
|
||||
} else {
|
||||
dis.SetCodeAddress(func_addr);
|
||||
dis.DisassembleA32Buffer(reinterpret_cast<uint32_t*>(
|
||||
base_addr + symbol.GetMemoryAddress()),
|
||||
func_size);
|
||||
}
|
||||
}
|
||||
munmap(base_addr, sb.st_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#else
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
// TODO: Implement this example for macOS.
|
||||
int main(void) {
|
||||
VIXL_WARNING("This example has not been implemented for macOS.");
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // __linux__
|
96
externals/vixl/vixl/examples/aarch32/examples.h
vendored
Normal file
96
externals/vixl/vixl/examples/aarch32/examples.h
vendored
Normal file
|
@ -0,0 +1,96 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_EXAMPLE_EXAMPLES_H_
|
||||
#define VIXL_EXAMPLE_EXAMPLES_H_
|
||||
|
||||
extern "C" {
|
||||
#include <stdint.h>
|
||||
#ifndef VIXL_INCLUDE_SIMULATOR_AARCH32
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
}
|
||||
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/macro-assembler-aarch32.h"
|
||||
|
||||
#ifndef VIXL_INCLUDE_SIMULATOR_AARCH32
|
||||
class ExecutableMemory {
|
||||
public:
|
||||
ExecutableMemory(const byte* code_start, size_t size)
|
||||
: size_(size),
|
||||
buffer_(reinterpret_cast<byte*>(mmap(NULL,
|
||||
size,
|
||||
PROT_READ | PROT_WRITE | PROT_EXEC,
|
||||
MAP_SHARED | MAP_ANONYMOUS,
|
||||
-1,
|
||||
0))) {
|
||||
VIXL_ASSERT(reinterpret_cast<intptr_t>(buffer_) != -1);
|
||||
memcpy(buffer_, code_start, size_);
|
||||
__builtin___clear_cache(buffer_, buffer_ + size_);
|
||||
}
|
||||
~ExecutableMemory() { munmap(buffer_, size_); }
|
||||
|
||||
template <typename T>
|
||||
T GetEntryPoint(const Label& entry_point, InstructionSet isa) const {
|
||||
int32_t location = entry_point.GetLocation();
|
||||
if (isa == T32) location += 1;
|
||||
return GetOffsetAddress<T>(location);
|
||||
}
|
||||
|
||||
protected:
|
||||
template <typename T>
|
||||
T GetOffsetAddress(int32_t offset) const {
|
||||
VIXL_ASSERT((offset >= 0) && (static_cast<size_t>(offset) <= size_));
|
||||
T function_address;
|
||||
byte* buffer_address = buffer_ + offset;
|
||||
memcpy(&function_address, &buffer_address, sizeof(T));
|
||||
return function_address;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t size_;
|
||||
byte* buffer_;
|
||||
};
|
||||
#endif
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// int32_t abs(int32_t x)
|
||||
//
|
||||
// The generated function computes the absolute value of an integer.
|
||||
void GenerateAbs(vixl::aarch32::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// uint32_t demo_function(uint32_t x)
|
||||
//
|
||||
// This is the example used in doc/getting-started-aarch32.md
|
||||
void GenerateDemo(vixl::aarch32::MacroAssembler* masm);
|
||||
|
||||
#endif // VIXL_EXAMPLE_EXAMPLES_H_
|
73
externals/vixl/vixl/examples/aarch32/getting-started.cc
vendored
Normal file
73
externals/vixl/vixl/examples/aarch32/getting-started.cc
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateDemo(MacroAssembler* masm) {
|
||||
// uint32_t demo(uint32_t x)
|
||||
|
||||
// Load a constant in r1 using the literal pool.
|
||||
__ Ldr(r1, 0x12345678);
|
||||
__ And(r0, r0, r1);
|
||||
__ Bx(lr);
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main() {
|
||||
MacroAssembler masm;
|
||||
// Generate the code for the example function.
|
||||
Label demo;
|
||||
// Tell the macro assembler that the label "demo" refer to the current
|
||||
// location in the buffer.
|
||||
masm.Bind(&demo);
|
||||
GenerateDemo(&masm);
|
||||
// Ensure that everything is generated and that the generated buffer is
|
||||
// ready to use.
|
||||
masm.FinalizeCode();
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
|
||||
// There is no simulator defined for VIXL AArch32.
|
||||
#else
|
||||
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
|
||||
uint32_t code_size = masm.GetSizeOfCodeGenerated();
|
||||
ExecutableMemory memory(code, code_size);
|
||||
// Run the example function.
|
||||
uint32_t (*demo_function)(uint32_t) =
|
||||
memory
|
||||
.GetEntryPoint<uint32_t (*)(uint32_t)>(demo,
|
||||
masm.GetInstructionSetInUse());
|
||||
uint32_t input_value = 0x89abcdef;
|
||||
uint32_t output_value = (*demo_function)(input_value);
|
||||
printf("native: demo(0x%08x) = 0x%08x\n", input_value, output_value);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif // TEST_EXAMPLES
|
222
externals/vixl/vixl/examples/aarch32/mandelbrot.cc
vendored
Normal file
222
externals/vixl/vixl/examples/aarch32/mandelbrot.cc
vendored
Normal file
|
@ -0,0 +1,222 @@
|
|||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateMandelBrot(MacroAssembler* masm) {
|
||||
const QRegister kCReal = q0;
|
||||
const QRegister kCImag = q1;
|
||||
|
||||
const QRegister kCRealStep = q13;
|
||||
const QRegister kCImagStep = q14;
|
||||
|
||||
const QRegister kModSqLimit = q15;
|
||||
|
||||
// Save register values.
|
||||
__ Push(RegisterList(r4, r5, r6));
|
||||
|
||||
__ Vmov(F32, kCRealStep, 0.125);
|
||||
__ Vmov(F32, kCImagStep, 0.0625);
|
||||
|
||||
const Register kZero = r2;
|
||||
__ Mov(kZero, 0);
|
||||
|
||||
const DRegister kStars = d6;
|
||||
const DRegister kSpaces = d7;
|
||||
// Output characters - packed 4 characters into 32 bits.
|
||||
__ Vmov(I8, kStars, '*');
|
||||
__ Vmov(I8, kSpaces, ' ');
|
||||
|
||||
const DRegisterLane kNegTwo = DRegisterLane(d7, 1);
|
||||
__ Vmov(s15, -2.0);
|
||||
|
||||
// Imaginary part of c.
|
||||
__ Vdup(Untyped32, kCImag, kNegTwo);
|
||||
|
||||
// Max modulus squared.
|
||||
__ Vmov(F32, kModSqLimit, 4.0);
|
||||
|
||||
// Height of output in characters.
|
||||
__ Mov(r4, 64);
|
||||
|
||||
// String length will be 129, so need 132 bytes of space.
|
||||
const uint32_t kStringLength = 132;
|
||||
|
||||
// Make space for our string.
|
||||
__ Sub(sp, sp, kStringLength);
|
||||
|
||||
// Set up a starting pointer for the string.
|
||||
const Register kStringPtr = r6;
|
||||
__ Mov(kStringPtr, sp);
|
||||
|
||||
// Loop over imaginary values of c from -2 to 2, taking
|
||||
// 64 equally spaced values in the range.
|
||||
{
|
||||
Label c_imag_loop;
|
||||
|
||||
__ Bind(&c_imag_loop);
|
||||
|
||||
// Real part of c.
|
||||
// Store 4 equally spaced values in q0 (kCReal) to use SIMD.
|
||||
__ Vmov(s0, -2.0);
|
||||
__ Vmov(s1, -1.96875);
|
||||
__ Vmov(s2, -1.9375);
|
||||
__ Vmov(s3, -1.90625);
|
||||
|
||||
// Width of output in terms of sets of 4 characters - twice that
|
||||
// of height to compensate for ratio of character height to width.
|
||||
__ Mov(r5, 32);
|
||||
|
||||
const Register kWriteCursor = r3;
|
||||
// Set a cursor ready to write the next line.
|
||||
__ Mov(kWriteCursor, kStringPtr);
|
||||
|
||||
// Loop over real values of c from -2 to 2, processing
|
||||
// 4 different values simultaneously using SIMD.
|
||||
{
|
||||
const QRegister kFlags = q2;
|
||||
const DRegister kLowerFlags = d4;
|
||||
|
||||
Label c_real_loop;
|
||||
__ Bind(&c_real_loop);
|
||||
|
||||
// Get number of iterations.
|
||||
__ Add(r1, r0, 1);
|
||||
|
||||
// Perform the iterations of z(n+1) = zn^2 + c using SIMD.
|
||||
// If the result is that c is in the set, the element of
|
||||
// kFlags will be 0, else ~0.
|
||||
{
|
||||
const QRegister kZReal = q8;
|
||||
const QRegister kZImag = q9;
|
||||
|
||||
// Real part of z.
|
||||
__ Vmov(F32, kZReal, 0.0);
|
||||
|
||||
// Imaginary part of z.
|
||||
__ Vmov(F32, kZImag, 0.0);
|
||||
|
||||
__ Vmov(F32, kFlags, 0.0);
|
||||
|
||||
Label iterative_formula_start, iterative_formula_end;
|
||||
__ Bind(&iterative_formula_start);
|
||||
__ Subs(r1, r1, 1);
|
||||
__ B(le, &iterative_formula_end);
|
||||
|
||||
// z(n+1) = zn^2 + c.
|
||||
// re(z(n+1)) = re(c) + re(zn)^2 - im(zn)^2.
|
||||
// im(z(n+1)) = im(c) + 2 * re(zn) * im(zn)
|
||||
|
||||
__ Vmul(F32, q10, kZReal, kZImag); // re(zn) * im(zn)
|
||||
|
||||
__ Vmul(F32, kZReal, kZReal, kZReal); // re(zn)^2
|
||||
__ Vadd(F32, kZReal, kCReal, kZReal); // re(c) + re(zn)^2
|
||||
__ Vmls(F32, kZReal, kZImag, kZImag); // re(c) + re(zn)^2 - im(zn)^2
|
||||
|
||||
__ Vmov(F32, kZImag, kCImag); // im(c)
|
||||
__ Vmls(F32, kZImag, q10, kNegTwo); // im(c) + 2 * re(zn) * im(zn)
|
||||
|
||||
__ Vmul(F32, q10, kZReal, kZReal); // re(z(n+1))^2
|
||||
__ Vmla(F32, q10, kZImag, kZImag); // re(z(n+1))^2 + im(z(n+1))^2
|
||||
__ Vcgt(F32, q10, q10, kModSqLimit); // |z(n+1)|^2 > 4 ? ~0 : 0
|
||||
__ Vorr(F32, kFlags, kFlags, q10); // (~0/0) | above result
|
||||
|
||||
__ B(&iterative_formula_start);
|
||||
__ Bind(&iterative_formula_end);
|
||||
}
|
||||
|
||||
// Narrow twice so that each mask is 8 bits, packed into
|
||||
// a single 32 bit register s4.
|
||||
// kLowerFlags is the lower half of kFlags, so the second narrow will
|
||||
// be working on the results of the first to halve the size of each
|
||||
// representation again.
|
||||
__ Vmovn(I32, kLowerFlags, kFlags);
|
||||
__ Vmovn(I16, kLowerFlags, kFlags);
|
||||
|
||||
// '*' if in set, ' ' if not.
|
||||
__ Vbsl(Untyped32, kLowerFlags, kSpaces, kStars);
|
||||
|
||||
// Add this to the string.
|
||||
__ Vst1(Untyped32,
|
||||
NeonRegisterList(kLowerFlags, 0),
|
||||
AlignedMemOperand(kWriteCursor, k32BitAlign, PostIndex));
|
||||
|
||||
// Increase real part of c.
|
||||
__ Vadd(F32, kCReal, kCReal, kCRealStep);
|
||||
|
||||
__ Subs(r5, r5, 1);
|
||||
__ B(ne, &c_real_loop);
|
||||
}
|
||||
|
||||
// Put terminating character.
|
||||
__ Strb(kZero, MemOperand(kWriteCursor));
|
||||
|
||||
// Print the string.
|
||||
__ Printf("%s\n", kStringPtr);
|
||||
|
||||
// Increase imaginary part of c.
|
||||
__ Vadd(F32, kCImag, kCImag, kCImagStep);
|
||||
|
||||
__ Subs(r4, r4, 1);
|
||||
__ B(ne, &c_imag_loop);
|
||||
}
|
||||
// Restore stack pointer.
|
||||
__ Add(sp, sp, kStringLength);
|
||||
// Restore register values.
|
||||
__ Pop(RegisterList(r4, r5, r6));
|
||||
__ Bx(lr);
|
||||
}
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main() {
|
||||
MacroAssembler masm;
|
||||
// Generate the code for the example function.
|
||||
Label mandelbrot;
|
||||
masm.Bind(&mandelbrot);
|
||||
GenerateMandelBrot(&masm);
|
||||
masm.FinalizeCode();
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
|
||||
// There is no simulator defined for VIXL AArch32.
|
||||
printf("This example cannot be simulated\n");
|
||||
#else
|
||||
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
|
||||
uint32_t code_size = masm.GetSizeOfCodeGenerated();
|
||||
ExecutableMemory memory(code, code_size);
|
||||
// Run the example function.
|
||||
double (*mandelbrot_func)(uint32_t) =
|
||||
memory.GetEntryPoint<double (*)(uint32_t)>(mandelbrot,
|
||||
masm.GetInstructionSetInUse());
|
||||
uint32_t iterations = 1000;
|
||||
(*mandelbrot_func)(iterations);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif // TEST_EXAMPLES
|
110
externals/vixl/vixl/examples/aarch32/pi.cc
vendored
Normal file
110
externals/vixl/vixl/examples/aarch32/pi.cc
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch32;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateApproximatePi(MacroAssembler* masm) {
|
||||
// double ApproximatePi(uint32_t iterations)
|
||||
// Very rough approximation of pi
|
||||
// pi/4 = 1 - 1/3 + 1/5 - 1/7 + ... + (-1)^n / (2n + 1)
|
||||
__ Cmp(r0, 0);
|
||||
__ Bx(eq, lr);
|
||||
__ Vpush(Untyped64, DRegisterList(d8, 8));
|
||||
__ Vldr(d0, 1.0);
|
||||
__ Vldr(d1, 3.0);
|
||||
__ Vldr(d2, 5.0);
|
||||
__ Vldr(d3, 7.0);
|
||||
|
||||
|
||||
__ Vmov(d4, 8.0);
|
||||
__ Vmov(d5, 1.0);
|
||||
|
||||
__ Vmov(I64, d10, 0); // d10 = 0.0;
|
||||
__ Vmov(I64, d11, 0); // d11 = 0.0;
|
||||
__ Vmov(I64, d12, 0); // d12 = 0.0;
|
||||
__ Vmov(I64, d13, 0); // d13 = 0.0
|
||||
|
||||
Label loop;
|
||||
__ Bind(&loop);
|
||||
|
||||
__ Vdiv(F64, d6, d5, d0);
|
||||
__ Vdiv(F64, d7, d5, d1);
|
||||
__ Vdiv(F64, d8, d5, d2);
|
||||
__ Vdiv(F64, d9, d5, d3);
|
||||
|
||||
__ Vadd(F64, d10, d10, d6);
|
||||
__ Vadd(F64, d11, d11, d7);
|
||||
__ Vadd(F64, d12, d12, d8);
|
||||
__ Vadd(F64, d13, d13, d9);
|
||||
|
||||
__ Vadd(F64, d0, d0, d4);
|
||||
__ Vadd(F64, d1, d1, d4);
|
||||
__ Vadd(F64, d2, d2, d4);
|
||||
__ Vadd(F64, d3, d3, d4);
|
||||
|
||||
__ Subs(r0, r0, 1);
|
||||
__ B(ne, &loop);
|
||||
|
||||
__ Vmov(F64, d4, 4.0);
|
||||
__ Vadd(F64, d10, d10, d12);
|
||||
__ Vadd(F64, d11, d11, d13);
|
||||
__ Vsub(F64, d10, d10, d11);
|
||||
__ Vmul(F64, d0, d10, d4);
|
||||
__ Vpop(Untyped64, DRegisterList(d8, 8));
|
||||
__ Bx(lr);
|
||||
}
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main() {
|
||||
MacroAssembler masm;
|
||||
// Generate the code for the example function.
|
||||
Label pi_approx;
|
||||
masm.Bind(&pi_approx);
|
||||
GenerateApproximatePi(&masm);
|
||||
masm.FinalizeCode();
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH32
|
||||
// There is no simulator defined for VIXL AArch32.
|
||||
printf("This example cannot be simulated\n");
|
||||
#else
|
||||
byte* code = masm.GetBuffer()->GetStartAddress<byte*>();
|
||||
uint32_t code_size = masm.GetSizeOfCodeGenerated();
|
||||
ExecutableMemory memory(code, code_size);
|
||||
// Run the example function.
|
||||
double (*pi_function)(uint32_t) =
|
||||
memory.GetEntryPoint<double (*)(uint32_t)>(pi_approx,
|
||||
masm.GetInstructionSetInUse());
|
||||
uint32_t repeat = 10000000;
|
||||
double output_value = (*pi_function)(repeat);
|
||||
printf("native: pi_approx(%u) = %3.10f\n", repeat, output_value);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
#endif // TEST_EXAMPLES
|
74
externals/vixl/vixl/examples/aarch64/abs.cc
vendored
Normal file
74
externals/vixl/vixl/examples/aarch64/abs.cc
vendored
Normal file
|
@ -0,0 +1,74 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateAbs(MacroAssembler* masm) {
|
||||
// int64_t abs(int64_t x)
|
||||
// Argument location:
|
||||
// x -> x0
|
||||
|
||||
// This example uses a conditional instruction (cneg) to compute the
|
||||
// absolute value of an integer.
|
||||
__ Cmp(x0, 0);
|
||||
__ Cneg(x0, x0, mi);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label abs;
|
||||
masm.Bind(&abs);
|
||||
GenerateAbs(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
int64_t input_value = -42;
|
||||
simulator.WriteXRegister(0, input_value);
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&abs));
|
||||
printf("abs(%" PRId64 ") = %" PRId64 "\n",
|
||||
input_value,
|
||||
simulator.ReadXRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
161
externals/vixl/vixl/examples/aarch64/add2-vectors.cc
vendored
Normal file
161
externals/vixl/vixl/examples/aarch64/add2-vectors.cc
vendored
Normal file
|
@ -0,0 +1,161 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// Macro to compute the number of elements in a vector.
|
||||
#define ARRAY_SIZE(Array) (sizeof(Array) / sizeof((Array)[0]))
|
||||
#define __ masm->
|
||||
|
||||
/*
|
||||
* This example adds two vectors with 1-byte elements using NEON instructions,
|
||||
* and returns the results in the first vector.
|
||||
*/
|
||||
void GenerateAdd2Vectors(MacroAssembler* masm) {
|
||||
// void add2_vectors(uint8_t *vec_a, const uint8_t *vec_b, unsigned size)
|
||||
// Argument locations:
|
||||
// vec_a (pointer) -> x0
|
||||
// vec_b (pointer) -> x1
|
||||
// size (integer) -> w2
|
||||
// Result returned in vec_a.
|
||||
|
||||
Label loop16, loopr, end;
|
||||
|
||||
// Loop to add vector elements in 16-byte chunks.
|
||||
__ Bind(&loop16);
|
||||
|
||||
// Handle vectors smaller than 16-bytes in the remainder loop.
|
||||
__ Cmp(w2, 16);
|
||||
__ B(lo, &loopr);
|
||||
__ Sub(w2, w2, 16);
|
||||
|
||||
// Add vectors in 16-byte chunks.
|
||||
__ Ld1(v0.V16B(), MemOperand(x0));
|
||||
__ Ld1(v1.V16B(), MemOperand(x1, 16, PostIndex));
|
||||
__ Add(v0.V16B(), v0.V16B(), v1.V16B());
|
||||
__ St1(v0.V16B(), MemOperand(x0, 16, PostIndex));
|
||||
|
||||
__ B(&loop16);
|
||||
|
||||
// Loop to add the remaining vector elements.
|
||||
__ Bind(&loopr);
|
||||
|
||||
// If there are no more vector elements to process, then exit.
|
||||
__ Cbz(w2, &end);
|
||||
__ Sub(w2, w2, 1);
|
||||
|
||||
// Add remaining vector elements in 1-byte chunks.
|
||||
__ Ldrb(w5, MemOperand(x0));
|
||||
__ Ldrb(w6, MemOperand(x1, 1, PostIndex));
|
||||
__ Add(w5, w5, w6);
|
||||
__ Strb(w5, MemOperand(x0, 1, PostIndex));
|
||||
|
||||
__ B(&loopr);
|
||||
|
||||
__ Bind(&end);
|
||||
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
void PrintVector(const uint8_t* vec, unsigned num) {
|
||||
unsigned i;
|
||||
printf("( ");
|
||||
if (num > 0) {
|
||||
for (i = 0; i < num - 1; ++i) {
|
||||
printf("%d, ", vec[i]);
|
||||
}
|
||||
printf("%d", vec[i]);
|
||||
}
|
||||
printf(" )\n");
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
|
||||
// Generate native code for the example function.
|
||||
Label add2_vectors;
|
||||
masm.Bind(&add2_vectors);
|
||||
GenerateAdd2Vectors(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Initialize input data for the example function.
|
||||
// clang-format: off
|
||||
uint8_t vec_a[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
|
||||
13, 14, 15, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
|
||||
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20};
|
||||
uint8_t vec_b[] = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
|
||||
29, 30, 31, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
|
||||
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36};
|
||||
// clang-format on
|
||||
uint8_t vec_c[ARRAY_SIZE(vec_a)];
|
||||
|
||||
// Check whether the number of elements in both vectors match.
|
||||
VIXL_CHECK(ARRAY_SIZE(vec_a) == ARRAY_SIZE(vec_b));
|
||||
|
||||
// Compute the result in C.
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(vec_a); i++) {
|
||||
vec_c[i] = vec_a[i] + vec_b[i];
|
||||
}
|
||||
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
uintptr_t vec_a_addr = reinterpret_cast<uintptr_t>(vec_a);
|
||||
uintptr_t vec_b_addr = reinterpret_cast<uintptr_t>(vec_b);
|
||||
|
||||
// Configure register environment in the simulator.
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
simulator.WriteXRegister(0, vec_a_addr);
|
||||
simulator.WriteXRegister(1, vec_b_addr);
|
||||
simulator.WriteXRegister(2, ARRAY_SIZE(vec_a));
|
||||
PrintVector(vec_a, ARRAY_SIZE(vec_a));
|
||||
printf(" +\n");
|
||||
PrintVector(vec_b, ARRAY_SIZE(vec_b));
|
||||
|
||||
// Run the example function in the simulator.
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&add2_vectors));
|
||||
printf(" =\n");
|
||||
PrintVector(vec_a, ARRAY_SIZE(vec_a));
|
||||
|
||||
// Check that the computed value in NEON matches the C version.
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(vec_a); i++) {
|
||||
VIXL_CHECK(vec_c[i] == vec_a[i]);
|
||||
}
|
||||
#else
|
||||
USE(vec_c);
|
||||
|
||||
// Placeholder to run test natively.
|
||||
printf("Running tests natively is not supported yet.\n");
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif // TEST_EXAMPLES
|
77
externals/vixl/vixl/examples/aarch64/add3-double.cc
vendored
Normal file
77
externals/vixl/vixl/examples/aarch64/add3-double.cc
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateAdd3Double(MacroAssembler* masm) {
|
||||
// double add3_double(double x, double y, double z)
|
||||
// Argument locations:
|
||||
// x -> d0
|
||||
// y -> d1
|
||||
// z -> d2
|
||||
__ Fadd(d0, d0, d1); // d0 <- x + y
|
||||
__ Fadd(d0, d0, d2); // d0 <- d0 + z
|
||||
|
||||
// The return value is already in d0.
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label add3_double;
|
||||
masm.Bind(&add3_double);
|
||||
GenerateAdd3Double(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
double a = 498.36547;
|
||||
double b = 23.369;
|
||||
double c = 7964.697954;
|
||||
simulator.WriteDRegister(0, a);
|
||||
simulator.WriteDRegister(1, b);
|
||||
simulator.WriteDRegister(2, c);
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&add3_double));
|
||||
printf("%f + %f + %f = %f\n", a, b, c, simulator.ReadDRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
90
externals/vixl/vixl/examples/aarch64/add4-double.cc
vendored
Normal file
90
externals/vixl/vixl/examples/aarch64/add4-double.cc
vendored
Normal file
|
@ -0,0 +1,90 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateAdd4Double(MacroAssembler* masm) {
|
||||
// double Add4Double(uint64_t a, double b, uint64_t c, double d)
|
||||
// Argument locations:
|
||||
// a -> x0
|
||||
// b -> d0
|
||||
// c -> x1
|
||||
// d -> d1
|
||||
|
||||
// Turn 'a' and 'c' into double values.
|
||||
__ Ucvtf(d2, x0);
|
||||
__ Ucvtf(d3, x1);
|
||||
|
||||
// Add everything together.
|
||||
__ Fadd(d0, d0, d1);
|
||||
__ Fadd(d2, d2, d3);
|
||||
__ Fadd(d0, d0, d2);
|
||||
|
||||
// The return value is in d0.
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label add4_double;
|
||||
masm.Bind(&add4_double);
|
||||
GenerateAdd4Double(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
uint64_t a = 21;
|
||||
double b = 987.3654;
|
||||
uint64_t c = 4387;
|
||||
double d = 36.698754;
|
||||
simulator.WriteXRegister(0, a);
|
||||
simulator.WriteDRegister(0, b);
|
||||
simulator.WriteXRegister(1, c);
|
||||
simulator.WriteDRegister(1, d);
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&add4_double));
|
||||
// clang-format off
|
||||
printf("%" PRIu64 " + %f + %" PRIu64 " + %f = %f\n",
|
||||
a, b, c, d, simulator.ReadDRegister(0));
|
||||
// clang-format on
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
106
externals/vixl/vixl/examples/aarch64/check-bounds.cc
vendored
Normal file
106
externals/vixl/vixl/examples/aarch64/check-bounds.cc
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateCheckBounds(MacroAssembler* masm) {
|
||||
// uint64_t check_bounds(uint64_t value, uint64_t low, uint64_t high)
|
||||
// Argument locations:
|
||||
// value -> x0
|
||||
// low -> x1
|
||||
// high -> x2
|
||||
|
||||
// First we compare 'value' with the 'low' bound. If x1 <= x0 the N flag will
|
||||
// be cleared. This configuration can be checked with the 'pl' condition.
|
||||
__ Cmp(x0, x1);
|
||||
|
||||
// Now we will compare 'value' and 'high' (x0 and x2) but only if the 'pl'
|
||||
// condition is verified. If the condition is not verified, we will clear
|
||||
// all the flags except the carry one (C flag).
|
||||
__ Ccmp(x0, x2, CFlag, pl);
|
||||
|
||||
// We set x0 to 1 only if the 'ls' condition is satisfied.
|
||||
// 'ls' performs the following test: !(C==1 && Z==0). If the previous
|
||||
// comparison has been skipped we have C==1 and Z==0, so the 'ls' test
|
||||
// will fail and x0 will be set to 0.
|
||||
// Otherwise if the previous comparison occurred, x0 will be set to 1
|
||||
// only if x0 is less than or equal to x2.
|
||||
__ Cset(x0, ls);
|
||||
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
void run_function(Simulator* simulator,
|
||||
Instruction* function,
|
||||
uint64_t value,
|
||||
uint64_t low,
|
||||
uint64_t high) {
|
||||
simulator->WriteXRegister(0, value);
|
||||
simulator->WriteXRegister(1, low);
|
||||
simulator->WriteXRegister(2, high);
|
||||
|
||||
simulator->RunFrom(function);
|
||||
printf("%" PRIu64 " %s between %" PRIu64 " and %" PRIu64 "\n",
|
||||
value,
|
||||
simulator->ReadXRegister(0) ? "is" : "is not",
|
||||
low,
|
||||
high);
|
||||
|
||||
simulator->ResetState();
|
||||
}
|
||||
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label check_bounds;
|
||||
masm.Bind(&check_bounds);
|
||||
GenerateCheckBounds(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
Instruction* function = masm.GetLabelAddress<Instruction*>(&check_bounds);
|
||||
run_function(&simulator, function, 546, 50, 1000);
|
||||
run_function(&simulator, function, 62, 100, 200);
|
||||
run_function(&simulator, function, 200, 100, 200);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
53
externals/vixl/vixl/examples/aarch64/cpu-features.cc
vendored
Normal file
53
externals/vixl/vixl/examples/aarch64/cpu-features.cc
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2020, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
// Demonstrate the use of VIXL's CPU feature detection, printing the features
|
||||
// that VIXL detects.
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main() {
|
||||
// Simple native deployments should initialise CPU features using
|
||||
// `InferFromOS()`. If not on an AArch64 host, this returns nothing.
|
||||
std::cout << "==== CPUFeatures::InferFromOS() ====\n";
|
||||
std::cout << CPUFeatures::InferFromOS() << "\n";
|
||||
|
||||
// VIXL assumes support for FP, NEON and CRC32 by default. These features were
|
||||
// implemented before the CPUFeatures mechanism.
|
||||
std::cout << "==== CPUFeatures::AArch64LegacyBaseline() ====\n";
|
||||
std::cout << CPUFeatures::AArch64LegacyBaseline() << "\n";
|
||||
|
||||
// Retrieve a list of all supported CPU features.
|
||||
std::cout << "==== CPUFeatures::All() ====\n";
|
||||
std::cout << CPUFeatures::All() << "\n";
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
106
externals/vixl/vixl/examples/aarch64/crc-checksums.cc
vendored
Normal file
106
externals/vixl/vixl/examples/aarch64/crc-checksums.cc
vendored
Normal file
|
@ -0,0 +1,106 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateCrc32(MacroAssembler* masm) {
|
||||
// uint32_t crc32(const char *msg, size_t length)
|
||||
// Argument location:
|
||||
// msg (pointer) -> x0
|
||||
// length (integer) -> x1
|
||||
|
||||
// This example computes CRC32CB on an input array of a given size
|
||||
// and returns the resulting checksum in w0.
|
||||
|
||||
Label loop, end;
|
||||
|
||||
// Move input array to temp register so we can re-use w0 as return register.
|
||||
__ Mov(x2, x0);
|
||||
|
||||
// Initial remainder for the checksum. If length=0, then this value will be
|
||||
// returned.
|
||||
__ Mov(w0, 0xffffffff);
|
||||
|
||||
// Loop for iterating through the array, starting at msg[0].
|
||||
__ Bind(&loop);
|
||||
|
||||
// If no more elements to process, then exit function.
|
||||
__ Cbz(x1, &end);
|
||||
__ Sub(x1, x1, 1);
|
||||
|
||||
// Compute checksum for msg[i].
|
||||
__ Ldrb(w3, MemOperand(x2, 1, PostIndex));
|
||||
__ Crc32b(w0, w0, w3);
|
||||
__ B(&loop);
|
||||
|
||||
__ Bind(&end);
|
||||
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
|
||||
void RunExample(const char* msg) {
|
||||
MacroAssembler masm;
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label func;
|
||||
masm.Bind(&func);
|
||||
GenerateCrc32(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
// Run example function in the simulator.
|
||||
uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
|
||||
size_t msg_size = strlen(msg);
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
simulator.WriteXRegister(0, msg_addr);
|
||||
simulator.WriteXRegister(1, msg_size);
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&func));
|
||||
printf("crc32(\"%s\")=0x%x\n", msg, simulator.ReadWRegister(0));
|
||||
#else
|
||||
// Run example function natively.
|
||||
printf("Not yet implemented.\n");
|
||||
USE(msg);
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
}
|
||||
|
||||
|
||||
int main(void) {
|
||||
RunExample("Hello World!");
|
||||
RunExample("do");
|
||||
RunExample("1");
|
||||
RunExample("");
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif // TEST_EXAMPLES
|
193
externals/vixl/vixl/examples/aarch64/custom-disassembler.cc
vendored
Normal file
193
externals/vixl/vixl/examples/aarch64/custom-disassembler.cc
vendored
Normal file
|
@ -0,0 +1,193 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "custom-disassembler.h"
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
|
||||
// We override this method to specify how register names should be disassembled.
|
||||
void CustomDisassembler::AppendRegisterNameToOutput(const Instruction* instr,
|
||||
const CPURegister& reg) {
|
||||
USE(instr);
|
||||
if (reg.IsRegister()) {
|
||||
switch (reg.GetCode()) {
|
||||
case 16:
|
||||
AppendToOutput(reg.Is64Bits() ? "ip0" : "wip0");
|
||||
return;
|
||||
case 17:
|
||||
AppendToOutput(reg.Is64Bits() ? "ip1" : "wip1");
|
||||
return;
|
||||
case 30:
|
||||
AppendToOutput(reg.Is64Bits() ? "lr" : "w30");
|
||||
return;
|
||||
case kSPRegInternalCode:
|
||||
AppendToOutput(reg.Is64Bits() ? "x_stack_pointer" : "w_stack_pointer");
|
||||
return;
|
||||
case 31:
|
||||
AppendToOutput(reg.Is64Bits() ? "x_zero_reg" : "w_zero_reg");
|
||||
return;
|
||||
default:
|
||||
// Fall through.
|
||||
break;
|
||||
}
|
||||
}
|
||||
// Print other register names as usual.
|
||||
Disassembler::AppendRegisterNameToOutput(instr, reg);
|
||||
}
|
||||
|
||||
|
||||
static const char* FakeLookupTargetDescription(const void* address) {
|
||||
USE(address);
|
||||
// We fake looking up the address.
|
||||
static int i = 0;
|
||||
const char* desc = NULL;
|
||||
if (i == 0) {
|
||||
desc = "label: somewhere";
|
||||
} else if (i == 2) {
|
||||
desc = "label: somewhere else";
|
||||
}
|
||||
i++;
|
||||
return desc;
|
||||
}
|
||||
|
||||
|
||||
// We override this method to add a description to addresses that we know about.
|
||||
// In this example we fake looking up a description, but in practice one could
|
||||
// for example use a table mapping addresses to function names.
|
||||
void CustomDisassembler::AppendCodeRelativeCodeAddressToOutput(
|
||||
const Instruction* instr, const void* addr) {
|
||||
USE(instr);
|
||||
// Print the address.
|
||||
int64_t rel_addr = CodeRelativeAddress(addr);
|
||||
if (rel_addr >= 0) {
|
||||
AppendToOutput("(addr 0x%" PRIx64, rel_addr);
|
||||
} else {
|
||||
AppendToOutput("(addr -0x%" PRIx64, -rel_addr);
|
||||
}
|
||||
|
||||
// If available, print a description of the address.
|
||||
const char* address_desc = FakeLookupTargetDescription(addr);
|
||||
if (address_desc != NULL) {
|
||||
Disassembler::AppendToOutput(" ; %s", address_desc);
|
||||
}
|
||||
AppendToOutput(")");
|
||||
}
|
||||
|
||||
|
||||
// We override this method to add a comment to this type of instruction. Helpers
|
||||
// from the vixl::Instruction class can be used to analyse the instruction being
|
||||
// disasssembled.
|
||||
void CustomDisassembler::VisitAddSubShifted(const Instruction* instr) {
|
||||
vixl::aarch64::Disassembler::VisitAddSubShifted(instr);
|
||||
if (instr->GetRd() == 10) {
|
||||
AppendToOutput(" // add/sub to x10");
|
||||
}
|
||||
ProcessOutput(instr);
|
||||
}
|
||||
|
||||
|
||||
void GenerateCustomDisassemblerTestCode(MacroAssembler* masm) {
|
||||
// Generate some code to illustrate how the modified disassembler changes the
|
||||
// disassembly output.
|
||||
Label begin, end;
|
||||
__ Bind(&begin);
|
||||
__ Add(x10, x16, x17);
|
||||
__ Cbz(x10, &end);
|
||||
__ Add(x11, ip0, ip1);
|
||||
__ Add(w5, w6, w30);
|
||||
__ Tbz(x10, 2, &begin);
|
||||
__ Tbnz(x10, 3, &begin);
|
||||
__ Br(x30);
|
||||
__ Br(lr);
|
||||
__ Fadd(d30, d16, d17);
|
||||
__ Push(xzr, xzr);
|
||||
__ Pop(x16, x20);
|
||||
__ Bind(&end);
|
||||
}
|
||||
|
||||
|
||||
void TestCustomDisassembler() {
|
||||
MacroAssembler masm;
|
||||
|
||||
// Generate the code.
|
||||
Label code_start, code_end;
|
||||
masm.Bind(&code_start);
|
||||
GenerateCustomDisassemblerTestCode(&masm);
|
||||
masm.Bind(&code_end);
|
||||
masm.FinalizeCode();
|
||||
Instruction* instr_start = masm.GetLabelAddress<Instruction*>(&code_start);
|
||||
Instruction* instr_end = masm.GetLabelAddress<Instruction*>(&code_end);
|
||||
|
||||
// Instantiate a standard disassembler, our custom disassembler, and register
|
||||
// them with a decoder.
|
||||
Decoder decoder;
|
||||
Disassembler disasm;
|
||||
CustomDisassembler custom_disasm;
|
||||
decoder.AppendVisitor(&disasm);
|
||||
decoder.AppendVisitor(&custom_disasm);
|
||||
|
||||
// In our custom disassembler, disassemble as if the base address was -0x8.
|
||||
// Note that this can also be achieved with
|
||||
// custom_disasm.MapCodeAddress(0x0, instr_start + 2 * kInstructionSize);
|
||||
// Users may generally want to map the start address to 0x0. Mapping to a
|
||||
// negative offset can be used to focus on the section of the
|
||||
// disassembly at address 0x0.
|
||||
custom_disasm.MapCodeAddress(-0x8, instr_start);
|
||||
|
||||
// Iterate through the instructions to show the difference in the disassembly.
|
||||
Instruction* instr;
|
||||
for (instr = instr_start; instr < instr_end; instr += kInstructionSize) {
|
||||
decoder.Decode(instr);
|
||||
printf("\n");
|
||||
printf("VIXL disasm\t %p:\t%s\n",
|
||||
reinterpret_cast<void*>(instr),
|
||||
disasm.GetOutput());
|
||||
int64_t rel_addr =
|
||||
custom_disasm.CodeRelativeAddress(reinterpret_cast<void*>(instr));
|
||||
char rel_addr_sign_char = ' ';
|
||||
if (rel_addr < 0) {
|
||||
rel_addr_sign_char = '-';
|
||||
rel_addr = -rel_addr;
|
||||
}
|
||||
printf("custom disasm\t%c0x%" PRIx64 ":\t%s\n",
|
||||
rel_addr_sign_char,
|
||||
rel_addr,
|
||||
custom_disasm.GetOutput());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main() {
|
||||
TestCustomDisassembler();
|
||||
return 0;
|
||||
}
|
||||
#endif
|
56
externals/vixl/vixl/examples/aarch64/custom-disassembler.h
vendored
Normal file
56
externals/vixl/vixl/examples/aarch64/custom-disassembler.h
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_EXAMPLES_CUSTOM_DISASSEMBLER_H_
|
||||
#define VIXL_EXAMPLES_CUSTOM_DISASSEMBLER_H_
|
||||
|
||||
#include "aarch64/disasm-aarch64.h"
|
||||
|
||||
void TestCustomDisassembler();
|
||||
|
||||
// We want to change three things in the disassembly:
|
||||
// - Add comments to some add/sub instructions.
|
||||
// - Use aliases for register names.
|
||||
// - Add descriptions for code addresses.
|
||||
class CustomDisassembler : public vixl::aarch64::Disassembler {
|
||||
public:
|
||||
CustomDisassembler() : vixl::aarch64::Disassembler() {}
|
||||
virtual ~CustomDisassembler() {}
|
||||
|
||||
virtual void VisitAddSubShifted(const vixl::aarch64::Instruction* instr)
|
||||
VIXL_OVERRIDE;
|
||||
|
||||
protected:
|
||||
virtual void AppendRegisterNameToOutput(
|
||||
const vixl::aarch64::Instruction* instr,
|
||||
const vixl::aarch64::CPURegister& reg) VIXL_OVERRIDE;
|
||||
|
||||
virtual void AppendCodeRelativeCodeAddressToOutput(
|
||||
const vixl::aarch64::Instruction* instr, const void* addr) VIXL_OVERRIDE;
|
||||
};
|
||||
|
||||
|
||||
#endif
|
134
externals/vixl/vixl/examples/aarch64/disasm.cc
vendored
Normal file
134
externals/vixl/vixl/examples/aarch64/disasm.cc
vendored
Normal file
|
@ -0,0 +1,134 @@
|
|||
// Copyright 2020, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdint.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "code-buffer-vixl.h"
|
||||
#include "aarch64/decoder-aarch64.h"
|
||||
#include "aarch64/disasm-aarch64.h"
|
||||
|
||||
// This example is interactive, and isn't tested systematically.
|
||||
#ifndef TEST_EXAMPLES
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
void PrintUsage(char const* name) {
|
||||
printf("Usage: %s [OPTION]... <INSTRUCTION>...\n", name);
|
||||
printf("\n");
|
||||
printf("Disassemble ad-hoc A64 instructions.\n");
|
||||
printf("\n");
|
||||
printf(
|
||||
"Options:\n"
|
||||
" --start-at <address>\n"
|
||||
" Start disassembling from <address> Any signed 64-bit value\n"
|
||||
" accepted by strtoll can be specified. The address is printed\n"
|
||||
" alongside each instruction, and it is also used to decode\n"
|
||||
" PC-relative offsets.\n"
|
||||
"\n"
|
||||
" Defaults to 0.\n"
|
||||
"\n");
|
||||
printf(
|
||||
"<instruction>\n"
|
||||
" A hexadecimal representation of an A64 instruction. The leading '0x'\n"
|
||||
" (or '0X') is optional.\n"
|
||||
"\n"
|
||||
" Multiple instructions can be provided; they will be disassembled as\n"
|
||||
" if they were read sequentially from memory.\n"
|
||||
"\n");
|
||||
printf("Examples:\n");
|
||||
printf(" $ %s d2824685\n", name);
|
||||
printf(" 0x0000000000000000: d2824685 movz x5, #0x1234\n");
|
||||
printf("\n");
|
||||
printf(" $ %s --start-at -4 0x10fffe85 0xd61f00a0\n", name);
|
||||
printf(" -0x0000000000000004: 10fffe85 adr x5, #-0x30 (addr -0x34)\n");
|
||||
printf(" 0x0000000000000000: d61f00a0 br x5\n");
|
||||
}
|
||||
|
||||
Instr ParseInstr(char const* arg) {
|
||||
// TODO: Error handling for out-of-range inputs.
|
||||
return (Instr)strtoul(arg, NULL, 16);
|
||||
}
|
||||
|
||||
int64_t ParseInt64(char const* arg) {
|
||||
// TODO: Error handling for out-of-range inputs.
|
||||
return (int64_t)strtoll(arg, NULL, 0);
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
for (int i = 1; i < argc; i++) {
|
||||
char const* arg = argv[i];
|
||||
if ((strcmp(arg, "--help") == 0) || (strcmp(arg, "-h") == 0)) {
|
||||
PrintUsage(argv[0]);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// Assume an address of 0, unless otherwise specified.
|
||||
int64_t start_address = 0;
|
||||
// Allocate space for one instruction per argument.
|
||||
CodeBuffer buffer((argc - 1) * kInstructionSize);
|
||||
|
||||
bool expect_start_at = false;
|
||||
for (int i = 1; i < argc; i++) {
|
||||
char* arg = argv[i];
|
||||
if (expect_start_at) {
|
||||
start_address = ParseInt64(arg);
|
||||
expect_start_at = false;
|
||||
} else if (strcmp(arg, "--start-at") == 0) {
|
||||
expect_start_at = true;
|
||||
} else {
|
||||
// Assume that everything else is an instruction.
|
||||
buffer.Emit(ParseInstr(arg));
|
||||
}
|
||||
}
|
||||
buffer.SetClean();
|
||||
|
||||
if (expect_start_at) {
|
||||
printf("No address given. Use: --start-at <address>\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (buffer.GetSizeInBytes() == 0) {
|
||||
printf("Nothing to disassemble.\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Disassemble the buffer.
|
||||
const Instruction* start = buffer.GetStartAddress<Instruction*>();
|
||||
const Instruction* end = buffer.GetEndAddress<Instruction*>();
|
||||
vixl::aarch64::PrintDisassembler disasm(stdout);
|
||||
disasm.PrintSignedAddresses(true);
|
||||
disasm.MapCodeAddress(start_address, start);
|
||||
disasm.DisassembleBuffer(start, end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif // TEST_EXAMPLES
|
128
externals/vixl/vixl/examples/aarch64/examples.h
vendored
Normal file
128
externals/vixl/vixl/examples/aarch64/examples.h
vendored
Normal file
|
@ -0,0 +1,128 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_EXAMPLE_EXAMPLES_H_
|
||||
#define VIXL_EXAMPLE_EXAMPLES_H_
|
||||
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
#include "aarch64/simulator-aarch64.h"
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// uint64_t factorial(uint64_t n)
|
||||
//
|
||||
// It provides an iterative implementation of the factorial computation.
|
||||
void GenerateFactorial(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// uint64_t factorial_rec(uint64_t n)
|
||||
//
|
||||
// It provides a recursive implementation of the factorial computation.
|
||||
void GenerateFactorialRec(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// void neon_matrix_multiply(float* dst, float* mat1, float* mat2)
|
||||
//
|
||||
// It provides an implementation of a column-major 4x4 matrix multiplication.
|
||||
void GenerateNEONMatrixMultiply(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// void add2_vectors(int8_t *vec_a, const int8_t *vec_b, unsigned size)
|
||||
//
|
||||
// Demonstrate how to add two vectors using NEON. The result is stored in vec_a.
|
||||
void GenerateAdd2Vectors(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// double add3_double(double x, double y, double z)
|
||||
//
|
||||
// This example is intended to show the calling convention with double
|
||||
// floating point arguments.
|
||||
void GenerateAdd3Double(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// double add4_double(uint64_t a, double b, uint64_t c, double d)
|
||||
//
|
||||
// The generated function pictures the calling convention for functions
|
||||
// mixing integer and floating point arguments.
|
||||
void GenerateAdd4Double(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// uint32_t sum_array(uint8_t* array, uint32_t size)
|
||||
//
|
||||
// The generated function computes the sum of all the elements in
|
||||
// the given array.
|
||||
void GenerateSumArray(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// int64_t abs(int64_t x)
|
||||
//
|
||||
// The generated function computes the absolute value of an integer.
|
||||
void GenerateAbs(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// uint64_t check_bounds(uint64_t value, uint64_t low, uint64_t high)
|
||||
//
|
||||
// The goal of this example is to illustrate the use of conditional
|
||||
// instructions. The generated function will check that the given value is
|
||||
// contained within the given boundaries. It returns 1 if 'value' is between
|
||||
// 'low' and 'high' (ie. low <= value <= high).
|
||||
void GenerateCheckBounds(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// uint32_t crc32(const char *msg, size_t msg_length)
|
||||
//
|
||||
// The generated function computes the CRC-32 checksum on the input msg
|
||||
// with specified length, and returns the result.
|
||||
void GenerateCrc32(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function which uses the stack to swap the content of the x0, x1,
|
||||
// x2 and x3 registers.
|
||||
void GenerateSwap4(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function which swaps the content of w0 and w1.
|
||||
// This example demonstrates some interesting features of VIXL's stack
|
||||
// operations.
|
||||
void GenerateSwapInt32(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// uint64_t demo_function(uint64_t x)
|
||||
//
|
||||
// This is the example used in doc/getting-started-aarch64.txt
|
||||
void GenerateDemoFunction(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// This function generates and runs code that uses literals to sum the `a` and
|
||||
// `b` inputs.
|
||||
int64_t LiteralExample(int64_t a, int64_t b);
|
||||
|
||||
// Generate a few examples of runtime calls.
|
||||
void GenerateRuntimeCallExamples(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
// Generate a function with the following prototype:
|
||||
// size_t sve_strlen(const char* str);
|
||||
//
|
||||
// The function implements the standard `strlen` using SVE.
|
||||
void GenerateSVEStrlen(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
#endif // VIXL_EXAMPLE_EXAMPLES_H_
|
86
externals/vixl/vixl/examples/aarch64/factorial-rec.cc
vendored
Normal file
86
externals/vixl/vixl/examples/aarch64/factorial-rec.cc
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateFactorialRec(MacroAssembler* masm) {
|
||||
// uint64_t factorial_rec(uint64_t n)
|
||||
// Argument location:
|
||||
// n -> x0
|
||||
|
||||
Label entry, input_is_zero;
|
||||
|
||||
__ Bind(&entry);
|
||||
// Check for the stopping condition: the input number is null.
|
||||
__ Cbz(x0, &input_is_zero);
|
||||
|
||||
__ Mov(x1, x0);
|
||||
__ Sub(x0, x0, 1);
|
||||
__ Push(x1, lr);
|
||||
__ Bl(&entry); // Recursive call factorial_rec(n - 1).
|
||||
__ Pop(lr, x1);
|
||||
__ Mul(x0, x0, x1);
|
||||
__ Ret();
|
||||
|
||||
__ Bind(&input_is_zero);
|
||||
__ Mov(x0, 1);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label factorial_rec;
|
||||
masm.Bind(&factorial_rec);
|
||||
GenerateFactorialRec(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
uint64_t input_val = 16;
|
||||
simulator.WriteXRegister(0, input_val);
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&factorial_rec));
|
||||
printf("factorial(%" PRIu64 ") = %" PRId64 "\n",
|
||||
input_val,
|
||||
simulator.ReadXRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
84
externals/vixl/vixl/examples/aarch64/factorial.cc
vendored
Normal file
84
externals/vixl/vixl/examples/aarch64/factorial.cc
vendored
Normal file
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateFactorial(MacroAssembler* masm) {
|
||||
// uint64_t factorial(uint64_t n)
|
||||
// Argument location:
|
||||
// n -> x0
|
||||
|
||||
Label loop, end;
|
||||
|
||||
__ Mov(x1, x0);
|
||||
__ Mov(x0, 1); // Use x0 as the accumulator.
|
||||
|
||||
__ Cbz(x1, &end); // Nothing to do if the input is null.
|
||||
|
||||
__ Bind(&loop);
|
||||
__ Mul(x0, x0, x1);
|
||||
__ Sub(x1, x1, 1);
|
||||
__ Cbnz(x1, &loop);
|
||||
|
||||
__ Bind(&end);
|
||||
// The return value is in x0.
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label factorial;
|
||||
masm.Bind(&factorial);
|
||||
GenerateFactorial(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
uint64_t input_val = 16;
|
||||
simulator.WriteXRegister(0, input_val);
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&factorial));
|
||||
printf("factorial(%" PRIu64 ") = %" PRId64 "\n",
|
||||
input_val,
|
||||
simulator.ReadXRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
67
externals/vixl/vixl/examples/aarch64/getting-started.cc
vendored
Normal file
67
externals/vixl/vixl/examples/aarch64/getting-started.cc
vendored
Normal file
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
#include "aarch64/simulator-aarch64.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
void GenerateDemoFunction(MacroAssembler *masm) {
|
||||
// uint64_t demo_function(uint64_t x)
|
||||
__ Ldr(x1, 0x1122334455667788);
|
||||
__ And(x0, x0, x1);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main() {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
Label demo_function;
|
||||
masm.Bind(&demo_function);
|
||||
GenerateDemoFunction(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
simulator.WriteXRegister(0, 0x8899aabbccddeeff);
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction *>(&demo_function));
|
||||
printf("x0 = %" PRIx64 "\n", simulator.ReadXRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
101
externals/vixl/vixl/examples/aarch64/literal.cc
vendored
Normal file
101
externals/vixl/vixl/examples/aarch64/literal.cc
vendored
Normal file
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm.
|
||||
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int64_t LiteralExample(int64_t a, int64_t b) {
|
||||
// Create and initialize the macro-assembler and the simulator.
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
Literal<int64_t> automatically_placed_literal(111, masm.GetLiteralPool());
|
||||
Literal<int64_t> manually_placed_literal(222);
|
||||
|
||||
// Generate some code.
|
||||
Label start;
|
||||
masm.Bind(&start);
|
||||
{
|
||||
ExactAssemblyScope scope(&masm,
|
||||
kInstructionSize + sizeof(int64_t),
|
||||
ExactAssemblyScope::kExactSize);
|
||||
Label over_literal;
|
||||
__ b(&over_literal);
|
||||
__ place(&manually_placed_literal);
|
||||
__ bind(&over_literal);
|
||||
}
|
||||
__ Ldr(x1, &manually_placed_literal);
|
||||
__ Ldr(x2, &automatically_placed_literal);
|
||||
__ Add(x0, x1, x2);
|
||||
__ Ret();
|
||||
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Usually, compilers will move the code to another place in memory before
|
||||
// executing it. Emulate that.
|
||||
size_t code_size = masm.GetSizeOfCodeGenerated();
|
||||
uint8_t* code = reinterpret_cast<uint8_t*>(malloc(code_size));
|
||||
if (code == NULL) {
|
||||
return 1;
|
||||
}
|
||||
memcpy(code, masm.GetBuffer()->GetStartAddress<void*>(), code_size);
|
||||
|
||||
// Run the code.
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&start));
|
||||
printf("111 + 222 = %" PRId64 "\n", simulator.ReadXRegister(0));
|
||||
|
||||
// Now let's modify the values of the literals.
|
||||
automatically_placed_literal.UpdateValue(a, code);
|
||||
manually_placed_literal.UpdateValue(b, code);
|
||||
|
||||
// Run the code again.
|
||||
simulator.RunFrom(reinterpret_cast<Instruction*>(code));
|
||||
printf("%" PRId64 " + %" PRId64 " = %" PRId64 "\n",
|
||||
a,
|
||||
b,
|
||||
simulator.ReadXRegister(0));
|
||||
|
||||
return simulator.ReadXRegister(0);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
VIXL_CHECK(LiteralExample(1, 2) == 3);
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
204
externals/vixl/vixl/examples/aarch64/neon-matrix-multiply.cc
vendored
Normal file
204
externals/vixl/vixl/examples/aarch64/neon-matrix-multiply.cc
vendored
Normal file
|
@ -0,0 +1,204 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
// A vector by scalar multiply helper routine to generate code for
|
||||
// the multiplication of each column of the resulting 4x4 matrix.
|
||||
// This function provides a template for the following pattern:
|
||||
//
|
||||
// __ Fmul(v<v_out>.V4S(), v4.V4S(), v<s_column>.S(), 0);
|
||||
// __ Fmla(v<v_out>.V4S(), v5.V4S(), v<s_column>.S(), 1);
|
||||
// __ Fmla(v<v_out>.V4S(), v6.V4S(), v<s_column>.S(), 2);
|
||||
// __ Fmla(v<v_out>.V4S(), v7.V4S(), v<s_column>.S(), 3);
|
||||
//
|
||||
// v<v_out> corresponds to a column of the output matrix (v0, v1, v2 or v3).
|
||||
// v<s_column> corresponds to a column of the 2nd input (v16, v17, v18 or v19).
|
||||
//
|
||||
static void GenerateMultiplyColumn(MacroAssembler* masm,
|
||||
unsigned out_column,
|
||||
unsigned in_column) {
|
||||
// 'v_out' splits a Q register into 4 lanes of 32 bits each.
|
||||
VRegister v_out = VRegister(out_column, kQRegSize, 4);
|
||||
// 'v_in' refers to a single 32 bit 'S' lane.
|
||||
VRegister v_in = VRegister(in_column, kSRegSize);
|
||||
|
||||
__ Fmul(v_out, v4.V4S(), v_in, 0); // e.g. (v0.V4S(), v4.V4S(), v8.S(), 0).
|
||||
__ Fmla(v_out, v5.V4S(), v_in, 1);
|
||||
__ Fmla(v_out, v6.V4S(), v_in, 2);
|
||||
__ Fmla(v_out, v7.V4S(), v_in, 3);
|
||||
}
|
||||
|
||||
void GenerateNEONMatrixMultiply(MacroAssembler* masm) {
|
||||
// Argument location:
|
||||
// dst -> x0
|
||||
// mat1 -> x1
|
||||
// mat2 -> x2
|
||||
|
||||
Label end;
|
||||
|
||||
__ And(x3, x0, x1);
|
||||
__ And(x3, x3, x2);
|
||||
__ Cbz(x3, &end); // Nothing to do if an input is null.
|
||||
|
||||
// Load the first matrix into v4, v5, v6 and v7.
|
||||
__ Ld1(v4.V4S(), v5.V4S(), v6.V4S(), v7.V4S(), MemOperand(x1));
|
||||
// Load the first matrix into v16, v17, v18 and v19.
|
||||
__ Ld1(v16.V4S(), v17.V4S(), v18.V4S(), v19.V4S(), MemOperand(x2));
|
||||
|
||||
// Initialise vectors of the output matrix with zeros.
|
||||
// This is only for the purposes of showing how this can be achived
|
||||
// but technically this is not required because we overwrite all lanes
|
||||
// of the output vectors.
|
||||
__ Movi(v0.V16B(), 0);
|
||||
__ Movi(v1.V16B(), 0);
|
||||
__ Movi(v2.V16B(), 0);
|
||||
__ Movi(v3.V16B(), 0);
|
||||
|
||||
GenerateMultiplyColumn(masm, 0, 16);
|
||||
GenerateMultiplyColumn(masm, 1, 17);
|
||||
GenerateMultiplyColumn(masm, 2, 18);
|
||||
GenerateMultiplyColumn(masm, 3, 19);
|
||||
|
||||
// Store the resulting matrix from v0, v1, v2 and v3.
|
||||
__ St1(v0.V4S(), v1.V4S(), v2.V4S(), v3.V4S(), MemOperand(x0));
|
||||
|
||||
__ Bind(&end);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label neon_matrix_multiply;
|
||||
masm.Bind(&neon_matrix_multiply);
|
||||
GenerateNEONMatrixMultiply(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Define the required variables and run the example function.
|
||||
const int kRowSize = 4;
|
||||
const int kColSize = 4;
|
||||
const int kLength = kRowSize * kColSize;
|
||||
|
||||
float mat1[kLength], mat2[kLength], output[kLength];
|
||||
|
||||
// Initialise the output matrix to the zero matrix.
|
||||
memset(output, 0, sizeof(output[0]) * kLength);
|
||||
|
||||
// Fill the two input matrices with some 32 bit floating point values.
|
||||
// Array initialisation using curly brackets is also possible like so:
|
||||
// float mat1[kLength] = { 1.0f, 52.03f, 4.43f, ... };
|
||||
// However, the following way better shows the "column-major" arrangement.
|
||||
|
||||
mat1[0] = 1.0f;
|
||||
mat1[4] = 2.0f;
|
||||
mat1[8] = 3.0f;
|
||||
mat1[12] = 4.0f;
|
||||
mat1[1] = 52.03f;
|
||||
mat1[5] = 12.24f;
|
||||
mat1[9] = 53.56f;
|
||||
mat1[13] = 22.22f;
|
||||
mat1[2] = 4.43f;
|
||||
mat1[6] = 5.00f;
|
||||
mat1[10] = 7.00f;
|
||||
mat1[14] = 3.11f;
|
||||
mat1[3] = 43.47f;
|
||||
mat1[7] = 10.97f;
|
||||
mat1[11] = 37.78f;
|
||||
mat1[15] = 90.91f;
|
||||
|
||||
mat2[0] = 1.0f;
|
||||
mat2[4] = 11.24f;
|
||||
mat2[8] = 21.00f;
|
||||
mat2[12] = 21.31f;
|
||||
mat2[1] = 2.0f;
|
||||
mat2[5] = 2.24f;
|
||||
mat2[9] = 8.56f;
|
||||
mat2[13] = 52.03f;
|
||||
mat2[2] = 3.0f;
|
||||
mat2[6] = 51.00f;
|
||||
mat2[10] = 21.00f;
|
||||
mat2[14] = 33.11f;
|
||||
mat2[3] = 4.0f;
|
||||
mat2[7] = 0.00f;
|
||||
mat2[11] = 84.00f;
|
||||
mat2[15] = 1.97f;
|
||||
|
||||
simulator.ResetState();
|
||||
simulator.WriteXRegister(0, reinterpret_cast<uintptr_t>(output));
|
||||
simulator.WriteXRegister(1, reinterpret_cast<uintptr_t>(mat1));
|
||||
simulator.WriteXRegister(2, reinterpret_cast<uintptr_t>(mat2));
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&neon_matrix_multiply));
|
||||
|
||||
// Print the 4x4 output matrix along with both 4x4 input matrices.
|
||||
for (int i = 0; i < kRowSize; i++) {
|
||||
printf(
|
||||
"| %8.2f %8.2f %8.2f %8.2f | "
|
||||
"| %8.2f %8.2f %8.2f %8.2f | "
|
||||
"| %8.2f %8.2f %8.2f %8.2f |\n",
|
||||
mat1[i],
|
||||
mat1[4 + i],
|
||||
mat1[8 + i],
|
||||
mat1[12 + i],
|
||||
mat2[i],
|
||||
mat2[4 + i],
|
||||
mat2[8 + i],
|
||||
mat2[12 + i],
|
||||
output[i],
|
||||
output[4 + i],
|
||||
output[8 + i],
|
||||
output[12 + i]);
|
||||
if (i == 0 || i == 2) {
|
||||
printf(
|
||||
"| | "
|
||||
"| | "
|
||||
"| |\n");
|
||||
} else if (i == 1) {
|
||||
printf(
|
||||
"| | x "
|
||||
"| | = "
|
||||
"| |\n");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
146
externals/vixl/vixl/examples/aarch64/non-const-visitor.cc
vendored
Normal file
146
externals/vixl/vixl/examples/aarch64/non-const-visitor.cc
vendored
Normal file
|
@ -0,0 +1,146 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "non-const-visitor.h"
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
|
||||
void SwitchAddSubRegisterSources::VisitAddSubShifted(const Instruction* instr) {
|
||||
int rn = instr->GetRn();
|
||||
int rm = instr->GetRm();
|
||||
// Only non-const visitors are allowed to discard constness of the visited
|
||||
// instruction.
|
||||
Instruction* mutable_instr = MutableInstruction(instr);
|
||||
Instr instr_bits = mutable_instr->GetInstructionBits();
|
||||
|
||||
// Switch the bitfields for the `rn` and `rm` registers.
|
||||
instr_bits &= ~(Rn_mask | Rm_mask);
|
||||
instr_bits |= (rn << Rm_offset) | (rm << Rn_offset);
|
||||
|
||||
// Rewrite the instruction.
|
||||
mutable_instr->SetInstructionBits(instr_bits);
|
||||
}
|
||||
|
||||
|
||||
void GenerateNonConstVisitorTestCode(MacroAssembler* masm) {
|
||||
// int64_t foo(int64_t a, int64_t b)
|
||||
// Argument locations:
|
||||
// a -> x0
|
||||
// b -> x1
|
||||
__ Sub(x0, x0, x1);
|
||||
// The return value is in x0.
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
int64_t RunNonConstVisitorTestGeneratedCode(const Instruction* start_instr) {
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
Decoder simulator_decoder;
|
||||
Simulator simulator(&simulator_decoder);
|
||||
|
||||
int64_t a = 5;
|
||||
int64_t b = 2;
|
||||
simulator.WriteXRegister(0, a);
|
||||
simulator.WriteXRegister(1, b);
|
||||
simulator.RunFrom(start_instr);
|
||||
int64_t res = simulator.ReadXRegister(0);
|
||||
printf("foo(%" PRId64 ", %" PRId64 ") = %" PRId64 "\n", a, b, res);
|
||||
|
||||
return res;
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
USE(start_instr);
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
|
||||
// Generate the code.
|
||||
Label code_start, code_end;
|
||||
masm.Bind(&code_start);
|
||||
GenerateNonConstVisitorTestCode(&masm);
|
||||
masm.Bind(&code_end);
|
||||
masm.FinalizeCode();
|
||||
Instruction* instr_start = masm.GetLabelAddress<Instruction*>(&code_start);
|
||||
Instruction* instr_end = masm.GetLabelAddress<Instruction*>(&code_end);
|
||||
|
||||
// Run the code a first time.
|
||||
RunNonConstVisitorTestGeneratedCode(instr_start);
|
||||
|
||||
// Instantiate a decoder, disassembler, and our custom modifying visitor.
|
||||
Decoder decoder;
|
||||
PrintDisassembler disasm(stdout);
|
||||
SwitchAddSubRegisterSources modifying_visitor;
|
||||
|
||||
// Register visitors in such a way that when visiting instructions, the
|
||||
// decoder will first disassemble the original instruction, modify it, and
|
||||
// then disassemble the modified instruction.
|
||||
decoder.AppendVisitor(&disasm);
|
||||
decoder.AppendVisitor(&modifying_visitor);
|
||||
decoder.AppendVisitor(&disasm);
|
||||
|
||||
// Iterate through the instructions.
|
||||
Instruction* instr;
|
||||
for (instr = instr_start; instr < instr_end; instr += kInstructionSize) {
|
||||
printf("---\n");
|
||||
decoder.Decode(instr);
|
||||
}
|
||||
|
||||
// Run the modified code and observe the different output from before.
|
||||
RunNonConstVisitorTestGeneratedCode(instr_start);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
||||
|
||||
|
||||
// This is only used by the testing code.
|
||||
void ModifyNonConstVisitorTestGeneratedCode(Instruction* start,
|
||||
Instruction* end) {
|
||||
Decoder decoder;
|
||||
SwitchAddSubRegisterSources modifying_visitor;
|
||||
decoder.AppendVisitor(&modifying_visitor);
|
||||
|
||||
Instruction* instr;
|
||||
for (instr = start; instr < end; instr += kInstructionSize) {
|
||||
printf("---\n");
|
||||
decoder.Decode(instr);
|
||||
}
|
||||
}
|
56
externals/vixl/vixl/examples/aarch64/non-const-visitor.h
vendored
Normal file
56
externals/vixl/vixl/examples/aarch64/non-const-visitor.h
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_EXAMPLES_NON_CONST_VISITOR_H_
|
||||
#define VIXL_EXAMPLES_NON_CONST_VISITOR_H_
|
||||
|
||||
#include "aarch64/decoder-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
|
||||
class SwitchAddSubRegisterSources
|
||||
: public vixl::aarch64::DecoderVisitorWithDefaults {
|
||||
public:
|
||||
SwitchAddSubRegisterSources()
|
||||
: vixl::aarch64::DecoderVisitorWithDefaults(kNonConstVisitor) {}
|
||||
|
||||
// Our visitor switches the register sources for some add and sub instructions
|
||||
// (not all add and sub instructions).
|
||||
|
||||
virtual void VisitAddSubShifted(const vixl::aarch64::Instruction* instr)
|
||||
VIXL_OVERRIDE;
|
||||
};
|
||||
|
||||
|
||||
void GenerateNonConstVisitorTestCode(vixl::aarch64::MacroAssembler* masm);
|
||||
|
||||
int64_t RunNonConstVisitorTestGeneratedCode(
|
||||
const vixl::aarch64::Instruction* start_instr);
|
||||
|
||||
void ModifyNonConstVisitorTestGeneratedCode(vixl::aarch64::Instruction* start,
|
||||
vixl::aarch64::Instruction* end);
|
||||
|
||||
|
||||
#endif
|
126
externals/vixl/vixl/examples/aarch64/simulated-runtime-calls.cc
vendored
Normal file
126
externals/vixl/vixl/examples/aarch64/simulated-runtime-calls.cc
vendored
Normal file
|
@ -0,0 +1,126 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
#include "aarch64/disasm-aarch64.h"
|
||||
#include "aarch64/macro-assembler-aarch64.h"
|
||||
#include "aarch64/simulator-aarch64.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
|
||||
|
||||
#define __ masm->
|
||||
|
||||
int32_t add_int32s(int32_t a, int32_t b) {
|
||||
printf("add_int32s(%d, %d)\n", a, b);
|
||||
return a + b;
|
||||
}
|
||||
|
||||
float int32_to_float(int32_t value) {
|
||||
printf("int32_to_float(%d)\n", value);
|
||||
return static_cast<float>(value);
|
||||
}
|
||||
|
||||
int32_t float_to_int32(float value) {
|
||||
printf("float_to_int32(%f)\n", value);
|
||||
return static_cast<int32_t>(value);
|
||||
}
|
||||
|
||||
void overload_function(float value) {
|
||||
printf("overload_function(float %f)\n", value);
|
||||
}
|
||||
|
||||
void overload_function(int32_t value) {
|
||||
printf("overload_function(int32_t %d)\n", value);
|
||||
}
|
||||
|
||||
void GenerateRuntimeCallExamples(MacroAssembler* masm) {
|
||||
// Preserve lr, since the calls will overwrite it.
|
||||
__ Push(xzr, lr);
|
||||
// The arguments are expected in the appropriate registers (following the
|
||||
// Aarch64 ABI).
|
||||
__ CallRuntime(add_int32s);
|
||||
// The result is in `w0`, as per the ABI.
|
||||
// Of course, one can use assembly to work with the results.
|
||||
__ Lsl(w0, w0, 2);
|
||||
__ CallRuntime(int32_to_float);
|
||||
// The result is in `s0`.
|
||||
// `CallRuntime` can infer template arguments when the call is not ambiguous.
|
||||
// Otherwise the template arguments must be specified in the form:
|
||||
// `__ CallRuntime<return_type, parameter_type_0, parameter_type_1, ...>();`
|
||||
__ CallRuntime<void, float>(overload_function);
|
||||
__ CallRuntime(float_to_int32);
|
||||
__ CallRuntime<void, int32_t>(overload_function);
|
||||
// Restore lr and return.
|
||||
__ Pop(lr, xzr);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label call_runtime_add_floats;
|
||||
masm.Bind(&call_runtime_add_floats);
|
||||
GenerateRuntimeCallExamples(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
Instruction* start =
|
||||
masm.GetLabelAddress<Instruction*>(&call_runtime_add_floats);
|
||||
|
||||
// Disassemble the generated code.
|
||||
PrintDisassembler disassembler(stdout);
|
||||
disassembler.DisassembleBuffer(start, masm.GetSizeOfCodeGenerated());
|
||||
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
int32_t input_a = 1;
|
||||
int32_t input_b = 2;
|
||||
simulator.WriteWRegister(0, input_a);
|
||||
simulator.WriteWRegister(1, input_b);
|
||||
simulator.RunFrom(start);
|
||||
printf("The final result is %d\n", simulator.ReadWRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// TODO: Support running natively.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
||||
#else
|
||||
#ifndef TEST_EXAMPLES
|
||||
int main(void) { return 0; }
|
||||
#endif // TEST_EXAMPLES
|
||||
#endif // VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
|
95
externals/vixl/vixl/examples/aarch64/sum-array.cc
vendored
Normal file
95
externals/vixl/vixl/examples/aarch64/sum-array.cc
vendored
Normal file
|
@ -0,0 +1,95 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define ARRAY_SIZE(Array) (sizeof(Array) / sizeof((Array)[0]))
|
||||
#define __ masm->
|
||||
|
||||
void GenerateSumArray(MacroAssembler* masm) {
|
||||
// uint32_t sum_array(uint8_t* array, uint32_t size)
|
||||
// Argument locations:
|
||||
// array (pointer) -> x0
|
||||
// size -> x1
|
||||
|
||||
Label loop, end;
|
||||
|
||||
__ Mov(x2, x0);
|
||||
__ Mov(w0, 0);
|
||||
|
||||
// There's nothing to do if the array is empty.
|
||||
__ Cbz(w1, &end);
|
||||
|
||||
// Go through the array and sum the elements.
|
||||
__ Bind(&loop);
|
||||
|
||||
__ Ldrb(w3, MemOperand(x2, 1, PostIndex)); // w3 = *(x2++)
|
||||
__ Add(w0, w0, w3);
|
||||
|
||||
__ Sub(w1, w1, 1);
|
||||
__ Cbnz(w1, &loop);
|
||||
|
||||
__ Bind(&end);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label sum_array;
|
||||
masm.Bind(&sum_array);
|
||||
GenerateSumArray(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
uint8_t data[] = {2, 45, 63, 7, 245, 38};
|
||||
uintptr_t data_addr = reinterpret_cast<uintptr_t>(data);
|
||||
simulator.WriteXRegister(0, data_addr);
|
||||
simulator.WriteXRegister(1, ARRAY_SIZE(data));
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&sum_array));
|
||||
|
||||
unsigned int i;
|
||||
for (i = 0; i < ARRAY_SIZE(data) - 1; ++i) {
|
||||
printf("%d + ", data[i]);
|
||||
}
|
||||
printf("%d = %d\n", data[i], simulator.ReadWRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
117
externals/vixl/vixl/examples/aarch64/sve-strlen.cc
vendored
Normal file
117
externals/vixl/vixl/examples/aarch64/sve-strlen.cc
vendored
Normal file
|
@ -0,0 +1,117 @@
|
|||
// Copyright 2020, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
// size_t sve_strlen(const char* str);
|
||||
void GenerateSVEStrlen(MacroAssembler* masm) {
|
||||
// We will accumulate the length as we load each chunk.
|
||||
Register len = x1;
|
||||
__ Mov(len, 0);
|
||||
|
||||
// We want to load as much as we can on each iteration, so set up an all-true
|
||||
// predicate for that purpose.
|
||||
PRegister all_true = p0;
|
||||
__ Ptrue(all_true.VnB());
|
||||
|
||||
Label loop;
|
||||
__ Bind(&loop);
|
||||
// FFR is cumulative, so reset it to all-true for each iteration.
|
||||
__ Setffr();
|
||||
|
||||
// Load as many characters as we can from &str[len]. We have to use a NF or FF
|
||||
// load, because we don't know how long the string is. An FF load is a good
|
||||
// choice, because we know that we will see at least a NULL termination, even
|
||||
// for an empty string.
|
||||
__ Ldff1b(z0.VnB(), all_true.Zeroing(), SVEMemOperand(x0, len));
|
||||
// For example, if str = "Test string.", and we load every byte:
|
||||
// z0.b: \0 . g n i r t s t s e T
|
||||
|
||||
// FFR now represents the number of bytes that we actually loaded, so use it
|
||||
// to predicate the data processing instructions.
|
||||
__ Rdffr(p1.VnB());
|
||||
|
||||
// Find the NULL termination (if there is one), and set the flags.
|
||||
__ Cmpeq(p2.VnB(), p1.Zeroing(), z0.VnB(), 0);
|
||||
// p2.b: 1 0 0 0 0 0 0 0 0 0 0 0 0
|
||||
|
||||
// Activate every lane up to (but not including) the NULL termination. If we
|
||||
// found no NULL termination, this activates every lane, and indicates that we
|
||||
// have to load another vector of characters. Lanes activated in this way
|
||||
// represent string characters that we need to count.
|
||||
__ Brkb(p1.VnB(), p1.Zeroing(), p2.VnB());
|
||||
// p1.b: 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1
|
||||
|
||||
// Count the active lanes, and add them to the length count.
|
||||
__ Incp(len, p1.VnB());
|
||||
|
||||
// Loop until `cmpeq` finds a NULL termination.
|
||||
__ B(sve_none, &loop);
|
||||
|
||||
// Put the length in the AAPCS64 return register.
|
||||
__ Mov(x0, len);
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// We're running on the simulator, so we can assume that SVE is present.
|
||||
masm.GetCPUFeatures()->Combine(CPUFeatures::kSVE);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label sve_strlen;
|
||||
masm.Bind(&sve_strlen);
|
||||
GenerateSVEStrlen(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
const char* example_input = "This is a string of exactly 42 characters.";
|
||||
VIXL_ASSERT(strlen(example_input) == 42);
|
||||
|
||||
simulator.ResetState();
|
||||
simulator.WriteXRegister(0, reinterpret_cast<uintptr_t>(example_input));
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&sve_strlen));
|
||||
|
||||
printf("strlen(\"%s\") == %" PRIu64 "\n",
|
||||
example_input,
|
||||
simulator.ReadXRegister(0));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
104
externals/vixl/vixl/examples/aarch64/swap-int32.cc
vendored
Normal file
104
externals/vixl/vixl/examples/aarch64/swap-int32.cc
vendored
Normal file
|
@ -0,0 +1,104 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateSwapInt32(MacroAssembler* masm) {
|
||||
{
|
||||
// In this scope the register x2 will be used by the macro-assembler
|
||||
// as the stack pointer (via peek, poke, push, etc).
|
||||
const Register old_stack_pointer = __ StackPointer();
|
||||
__ Mov(x2, __ StackPointer());
|
||||
__ SetStackPointer(x2);
|
||||
|
||||
// This call to Claim is not 16-byte aligned and would have failed
|
||||
// if the current stack pointer was sp.
|
||||
__ Claim(8);
|
||||
|
||||
__ Poke(w0, 0);
|
||||
__ Poke(w1, 4);
|
||||
__ Peek(w1, 0);
|
||||
__ Peek(w0, 4);
|
||||
|
||||
__ Drop(8);
|
||||
|
||||
// Even if we didn't use the system stack pointer, sp might have been
|
||||
// modified because the ABI forbids access to memory below the stack
|
||||
// pointer.
|
||||
__ Mov(old_stack_pointer, __ StackPointer());
|
||||
__ SetStackPointer(old_stack_pointer);
|
||||
}
|
||||
|
||||
// The stack pointer has now been switched back to sp.
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label swap_int32;
|
||||
masm.Bind(&swap_int32);
|
||||
GenerateSwapInt32(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
simulator.WriteWRegister(0, 0x11111111);
|
||||
simulator.WriteWRegister(1, 0x22222222);
|
||||
|
||||
// clang-format off
|
||||
printf("Before swap_int32:\n"
|
||||
"x0 = 0x%" PRIx32 "\n"
|
||||
"x1 = 0x%" PRIx32 "\n",
|
||||
simulator.ReadWRegister(0), simulator.ReadWRegister(1));
|
||||
// clang-format on
|
||||
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&swap_int32));
|
||||
|
||||
// clang-format off
|
||||
printf("After swap_int32:\n"
|
||||
"x0 = 0x%" PRIx32 "\n"
|
||||
"x1 = 0x%" PRIx32 "\n",
|
||||
simulator.ReadWRegister(0), simulator.ReadWRegister(1));
|
||||
// clang-format on
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
98
externals/vixl/vixl/examples/aarch64/swap4.cc
vendored
Normal file
98
externals/vixl/vixl/examples/aarch64/swap4.cc
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "examples.h"
|
||||
|
||||
using namespace vixl;
|
||||
using namespace vixl::aarch64;
|
||||
|
||||
#define __ masm->
|
||||
|
||||
void GenerateSwap4(MacroAssembler* masm) {
|
||||
// VIXL's macro assembler provides some functions to manipulate the stack.
|
||||
// This example shows some of these functions.
|
||||
__ Claim(16);
|
||||
__ Poke(x0, 0);
|
||||
__ Poke(x1, 8);
|
||||
__ Push(x3, x2);
|
||||
|
||||
__ Pop(x1, x0);
|
||||
__ Peek(x3, 0);
|
||||
__ Peek(x2, 8);
|
||||
__ Drop(16);
|
||||
|
||||
__ Ret();
|
||||
}
|
||||
|
||||
|
||||
#ifndef TEST_EXAMPLES
|
||||
#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
int main(void) {
|
||||
MacroAssembler masm;
|
||||
Decoder decoder;
|
||||
Simulator simulator(&decoder);
|
||||
|
||||
// Generate the code for the example function.
|
||||
Label swap4;
|
||||
masm.Bind(&swap4);
|
||||
GenerateSwap4(&masm);
|
||||
masm.FinalizeCode();
|
||||
|
||||
// Run the example function.
|
||||
simulator.WriteXRegister(0, 0x1111111111111111);
|
||||
simulator.WriteXRegister(1, 0x2222222222222222);
|
||||
simulator.WriteXRegister(2, 0x3333333333333333);
|
||||
simulator.WriteXRegister(3, 0x4444444444444444);
|
||||
|
||||
// clang-format off
|
||||
printf("Before swap4:\n"
|
||||
"x0 = 0x%" PRIx64 "\n"
|
||||
"x1 = 0x%" PRIx64 "\n"
|
||||
"x2 = 0x%" PRIx64 "\n"
|
||||
"x3 = 0x%" PRIx64 "\n",
|
||||
simulator.ReadXRegister(0), simulator.ReadXRegister(1),
|
||||
simulator.ReadXRegister(2), simulator.ReadXRegister(3));
|
||||
// clang-format on
|
||||
|
||||
simulator.RunFrom(masm.GetLabelAddress<Instruction*>(&swap4));
|
||||
|
||||
// clang-format off
|
||||
printf("After swap4:\n"
|
||||
"x0 = 0x%" PRIx64 "\n"
|
||||
"x1 = 0x%" PRIx64 "\n"
|
||||
"x2 = 0x%" PRIx64 "\n"
|
||||
"x3 = 0x%" PRIx64 "\n",
|
||||
simulator.ReadXRegister(0), simulator.ReadXRegister(1),
|
||||
simulator.ReadXRegister(2), simulator.ReadXRegister(3));
|
||||
// clang-format on
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
// Without the simulator there is nothing to test.
|
||||
int main(void) { return 0; }
|
||||
#endif // VIXL_INCLUDE_SIMULATOR_AARCH64
|
||||
#endif // TEST_EXAMPLES
|
27923
externals/vixl/vixl/src/aarch32/assembler-aarch32.cc
vendored
Normal file
27923
externals/vixl/vixl/src/aarch32/assembler-aarch32.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
6159
externals/vixl/vixl/src/aarch32/assembler-aarch32.h
vendored
Normal file
6159
externals/vixl/vixl/src/aarch32/assembler-aarch32.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
855
externals/vixl/vixl/src/aarch32/constants-aarch32.cc
vendored
Normal file
855
externals/vixl/vixl/src/aarch32/constants-aarch32.cc
vendored
Normal file
|
@ -0,0 +1,855 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "utils-vixl.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
// Start of generated code.
|
||||
const char* ToCString(InstructionType type) {
|
||||
switch (type) {
|
||||
case kAdc:
|
||||
return "adc";
|
||||
case kAdcs:
|
||||
return "adcs";
|
||||
case kAdd:
|
||||
return "add";
|
||||
case kAdds:
|
||||
return "adds";
|
||||
case kAddw:
|
||||
return "addw";
|
||||
case kAdr:
|
||||
return "adr";
|
||||
case kAnd:
|
||||
return "and";
|
||||
case kAnds:
|
||||
return "ands";
|
||||
case kAsr:
|
||||
return "asr";
|
||||
case kAsrs:
|
||||
return "asrs";
|
||||
case kB:
|
||||
return "b";
|
||||
case kBfc:
|
||||
return "bfc";
|
||||
case kBfi:
|
||||
return "bfi";
|
||||
case kBic:
|
||||
return "bic";
|
||||
case kBics:
|
||||
return "bics";
|
||||
case kBkpt:
|
||||
return "bkpt";
|
||||
case kBl:
|
||||
return "bl";
|
||||
case kBlx:
|
||||
return "blx";
|
||||
case kBx:
|
||||
return "bx";
|
||||
case kBxj:
|
||||
return "bxj";
|
||||
case kCbnz:
|
||||
return "cbnz";
|
||||
case kCbz:
|
||||
return "cbz";
|
||||
case kClrex:
|
||||
return "clrex";
|
||||
case kClz:
|
||||
return "clz";
|
||||
case kCmn:
|
||||
return "cmn";
|
||||
case kCmp:
|
||||
return "cmp";
|
||||
case kCrc32b:
|
||||
return "crc32b";
|
||||
case kCrc32cb:
|
||||
return "crc32cb";
|
||||
case kCrc32ch:
|
||||
return "crc32ch";
|
||||
case kCrc32cw:
|
||||
return "crc32cw";
|
||||
case kCrc32h:
|
||||
return "crc32h";
|
||||
case kCrc32w:
|
||||
return "crc32w";
|
||||
case kDmb:
|
||||
return "dmb";
|
||||
case kDsb:
|
||||
return "dsb";
|
||||
case kEor:
|
||||
return "eor";
|
||||
case kEors:
|
||||
return "eors";
|
||||
case kFldmdbx:
|
||||
return "fldmdbx";
|
||||
case kFldmiax:
|
||||
return "fldmiax";
|
||||
case kFstmdbx:
|
||||
return "fstmdbx";
|
||||
case kFstmiax:
|
||||
return "fstmiax";
|
||||
case kHlt:
|
||||
return "hlt";
|
||||
case kHvc:
|
||||
return "hvc";
|
||||
case kIsb:
|
||||
return "isb";
|
||||
case kIt:
|
||||
return "it";
|
||||
case kLda:
|
||||
return "lda";
|
||||
case kLdab:
|
||||
return "ldab";
|
||||
case kLdaex:
|
||||
return "ldaex";
|
||||
case kLdaexb:
|
||||
return "ldaexb";
|
||||
case kLdaexd:
|
||||
return "ldaexd";
|
||||
case kLdaexh:
|
||||
return "ldaexh";
|
||||
case kLdah:
|
||||
return "ldah";
|
||||
case kLdm:
|
||||
return "ldm";
|
||||
case kLdmda:
|
||||
return "ldmda";
|
||||
case kLdmdb:
|
||||
return "ldmdb";
|
||||
case kLdmea:
|
||||
return "ldmea";
|
||||
case kLdmed:
|
||||
return "ldmed";
|
||||
case kLdmfa:
|
||||
return "ldmfa";
|
||||
case kLdmfd:
|
||||
return "ldmfd";
|
||||
case kLdmib:
|
||||
return "ldmib";
|
||||
case kLdr:
|
||||
return "ldr";
|
||||
case kLdrb:
|
||||
return "ldrb";
|
||||
case kLdrd:
|
||||
return "ldrd";
|
||||
case kLdrex:
|
||||
return "ldrex";
|
||||
case kLdrexb:
|
||||
return "ldrexb";
|
||||
case kLdrexd:
|
||||
return "ldrexd";
|
||||
case kLdrexh:
|
||||
return "ldrexh";
|
||||
case kLdrh:
|
||||
return "ldrh";
|
||||
case kLdrsb:
|
||||
return "ldrsb";
|
||||
case kLdrsh:
|
||||
return "ldrsh";
|
||||
case kLsl:
|
||||
return "lsl";
|
||||
case kLsls:
|
||||
return "lsls";
|
||||
case kLsr:
|
||||
return "lsr";
|
||||
case kLsrs:
|
||||
return "lsrs";
|
||||
case kMla:
|
||||
return "mla";
|
||||
case kMlas:
|
||||
return "mlas";
|
||||
case kMls:
|
||||
return "mls";
|
||||
case kMov:
|
||||
return "mov";
|
||||
case kMovs:
|
||||
return "movs";
|
||||
case kMovt:
|
||||
return "movt";
|
||||
case kMovw:
|
||||
return "movw";
|
||||
case kMrs:
|
||||
return "mrs";
|
||||
case kMsr:
|
||||
return "msr";
|
||||
case kMul:
|
||||
return "mul";
|
||||
case kMuls:
|
||||
return "muls";
|
||||
case kMvn:
|
||||
return "mvn";
|
||||
case kMvns:
|
||||
return "mvns";
|
||||
case kNop:
|
||||
return "nop";
|
||||
case kOrn:
|
||||
return "orn";
|
||||
case kOrns:
|
||||
return "orns";
|
||||
case kOrr:
|
||||
return "orr";
|
||||
case kOrrs:
|
||||
return "orrs";
|
||||
case kPkhbt:
|
||||
return "pkhbt";
|
||||
case kPkhtb:
|
||||
return "pkhtb";
|
||||
case kPld:
|
||||
return "pld";
|
||||
case kPldw:
|
||||
return "pldw";
|
||||
case kPli:
|
||||
return "pli";
|
||||
case kPop:
|
||||
return "pop";
|
||||
case kPush:
|
||||
return "push";
|
||||
case kQadd:
|
||||
return "qadd";
|
||||
case kQadd16:
|
||||
return "qadd16";
|
||||
case kQadd8:
|
||||
return "qadd8";
|
||||
case kQasx:
|
||||
return "qasx";
|
||||
case kQdadd:
|
||||
return "qdadd";
|
||||
case kQdsub:
|
||||
return "qdsub";
|
||||
case kQsax:
|
||||
return "qsax";
|
||||
case kQsub:
|
||||
return "qsub";
|
||||
case kQsub16:
|
||||
return "qsub16";
|
||||
case kQsub8:
|
||||
return "qsub8";
|
||||
case kRbit:
|
||||
return "rbit";
|
||||
case kRev:
|
||||
return "rev";
|
||||
case kRev16:
|
||||
return "rev16";
|
||||
case kRevsh:
|
||||
return "revsh";
|
||||
case kRor:
|
||||
return "ror";
|
||||
case kRors:
|
||||
return "rors";
|
||||
case kRrx:
|
||||
return "rrx";
|
||||
case kRrxs:
|
||||
return "rrxs";
|
||||
case kRsb:
|
||||
return "rsb";
|
||||
case kRsbs:
|
||||
return "rsbs";
|
||||
case kRsc:
|
||||
return "rsc";
|
||||
case kRscs:
|
||||
return "rscs";
|
||||
case kSadd16:
|
||||
return "sadd16";
|
||||
case kSadd8:
|
||||
return "sadd8";
|
||||
case kSasx:
|
||||
return "sasx";
|
||||
case kSbc:
|
||||
return "sbc";
|
||||
case kSbcs:
|
||||
return "sbcs";
|
||||
case kSbfx:
|
||||
return "sbfx";
|
||||
case kSdiv:
|
||||
return "sdiv";
|
||||
case kSel:
|
||||
return "sel";
|
||||
case kShadd16:
|
||||
return "shadd16";
|
||||
case kShadd8:
|
||||
return "shadd8";
|
||||
case kShasx:
|
||||
return "shasx";
|
||||
case kShsax:
|
||||
return "shsax";
|
||||
case kShsub16:
|
||||
return "shsub16";
|
||||
case kShsub8:
|
||||
return "shsub8";
|
||||
case kSmlabb:
|
||||
return "smlabb";
|
||||
case kSmlabt:
|
||||
return "smlabt";
|
||||
case kSmlad:
|
||||
return "smlad";
|
||||
case kSmladx:
|
||||
return "smladx";
|
||||
case kSmlal:
|
||||
return "smlal";
|
||||
case kSmlalbb:
|
||||
return "smlalbb";
|
||||
case kSmlalbt:
|
||||
return "smlalbt";
|
||||
case kSmlald:
|
||||
return "smlald";
|
||||
case kSmlaldx:
|
||||
return "smlaldx";
|
||||
case kSmlals:
|
||||
return "smlals";
|
||||
case kSmlaltb:
|
||||
return "smlaltb";
|
||||
case kSmlaltt:
|
||||
return "smlaltt";
|
||||
case kSmlatb:
|
||||
return "smlatb";
|
||||
case kSmlatt:
|
||||
return "smlatt";
|
||||
case kSmlawb:
|
||||
return "smlawb";
|
||||
case kSmlawt:
|
||||
return "smlawt";
|
||||
case kSmlsd:
|
||||
return "smlsd";
|
||||
case kSmlsdx:
|
||||
return "smlsdx";
|
||||
case kSmlsld:
|
||||
return "smlsld";
|
||||
case kSmlsldx:
|
||||
return "smlsldx";
|
||||
case kSmmla:
|
||||
return "smmla";
|
||||
case kSmmlar:
|
||||
return "smmlar";
|
||||
case kSmmls:
|
||||
return "smmls";
|
||||
case kSmmlsr:
|
||||
return "smmlsr";
|
||||
case kSmmul:
|
||||
return "smmul";
|
||||
case kSmmulr:
|
||||
return "smmulr";
|
||||
case kSmuad:
|
||||
return "smuad";
|
||||
case kSmuadx:
|
||||
return "smuadx";
|
||||
case kSmulbb:
|
||||
return "smulbb";
|
||||
case kSmulbt:
|
||||
return "smulbt";
|
||||
case kSmull:
|
||||
return "smull";
|
||||
case kSmulls:
|
||||
return "smulls";
|
||||
case kSmultb:
|
||||
return "smultb";
|
||||
case kSmultt:
|
||||
return "smultt";
|
||||
case kSmulwb:
|
||||
return "smulwb";
|
||||
case kSmulwt:
|
||||
return "smulwt";
|
||||
case kSmusd:
|
||||
return "smusd";
|
||||
case kSmusdx:
|
||||
return "smusdx";
|
||||
case kSsat:
|
||||
return "ssat";
|
||||
case kSsat16:
|
||||
return "ssat16";
|
||||
case kSsax:
|
||||
return "ssax";
|
||||
case kSsub16:
|
||||
return "ssub16";
|
||||
case kSsub8:
|
||||
return "ssub8";
|
||||
case kStl:
|
||||
return "stl";
|
||||
case kStlb:
|
||||
return "stlb";
|
||||
case kStlex:
|
||||
return "stlex";
|
||||
case kStlexb:
|
||||
return "stlexb";
|
||||
case kStlexd:
|
||||
return "stlexd";
|
||||
case kStlexh:
|
||||
return "stlexh";
|
||||
case kStlh:
|
||||
return "stlh";
|
||||
case kStm:
|
||||
return "stm";
|
||||
case kStmda:
|
||||
return "stmda";
|
||||
case kStmdb:
|
||||
return "stmdb";
|
||||
case kStmea:
|
||||
return "stmea";
|
||||
case kStmed:
|
||||
return "stmed";
|
||||
case kStmfa:
|
||||
return "stmfa";
|
||||
case kStmfd:
|
||||
return "stmfd";
|
||||
case kStmib:
|
||||
return "stmib";
|
||||
case kStr:
|
||||
return "str";
|
||||
case kStrb:
|
||||
return "strb";
|
||||
case kStrd:
|
||||
return "strd";
|
||||
case kStrex:
|
||||
return "strex";
|
||||
case kStrexb:
|
||||
return "strexb";
|
||||
case kStrexd:
|
||||
return "strexd";
|
||||
case kStrexh:
|
||||
return "strexh";
|
||||
case kStrh:
|
||||
return "strh";
|
||||
case kSub:
|
||||
return "sub";
|
||||
case kSubs:
|
||||
return "subs";
|
||||
case kSubw:
|
||||
return "subw";
|
||||
case kSvc:
|
||||
return "svc";
|
||||
case kSxtab:
|
||||
return "sxtab";
|
||||
case kSxtab16:
|
||||
return "sxtab16";
|
||||
case kSxtah:
|
||||
return "sxtah";
|
||||
case kSxtb:
|
||||
return "sxtb";
|
||||
case kSxtb16:
|
||||
return "sxtb16";
|
||||
case kSxth:
|
||||
return "sxth";
|
||||
case kTbb:
|
||||
return "tbb";
|
||||
case kTbh:
|
||||
return "tbh";
|
||||
case kTeq:
|
||||
return "teq";
|
||||
case kTst:
|
||||
return "tst";
|
||||
case kUadd16:
|
||||
return "uadd16";
|
||||
case kUadd8:
|
||||
return "uadd8";
|
||||
case kUasx:
|
||||
return "uasx";
|
||||
case kUbfx:
|
||||
return "ubfx";
|
||||
case kUdf:
|
||||
return "udf";
|
||||
case kUdiv:
|
||||
return "udiv";
|
||||
case kUhadd16:
|
||||
return "uhadd16";
|
||||
case kUhadd8:
|
||||
return "uhadd8";
|
||||
case kUhasx:
|
||||
return "uhasx";
|
||||
case kUhsax:
|
||||
return "uhsax";
|
||||
case kUhsub16:
|
||||
return "uhsub16";
|
||||
case kUhsub8:
|
||||
return "uhsub8";
|
||||
case kUmaal:
|
||||
return "umaal";
|
||||
case kUmlal:
|
||||
return "umlal";
|
||||
case kUmlals:
|
||||
return "umlals";
|
||||
case kUmull:
|
||||
return "umull";
|
||||
case kUmulls:
|
||||
return "umulls";
|
||||
case kUqadd16:
|
||||
return "uqadd16";
|
||||
case kUqadd8:
|
||||
return "uqadd8";
|
||||
case kUqasx:
|
||||
return "uqasx";
|
||||
case kUqsax:
|
||||
return "uqsax";
|
||||
case kUqsub16:
|
||||
return "uqsub16";
|
||||
case kUqsub8:
|
||||
return "uqsub8";
|
||||
case kUsad8:
|
||||
return "usad8";
|
||||
case kUsada8:
|
||||
return "usada8";
|
||||
case kUsat:
|
||||
return "usat";
|
||||
case kUsat16:
|
||||
return "usat16";
|
||||
case kUsax:
|
||||
return "usax";
|
||||
case kUsub16:
|
||||
return "usub16";
|
||||
case kUsub8:
|
||||
return "usub8";
|
||||
case kUxtab:
|
||||
return "uxtab";
|
||||
case kUxtab16:
|
||||
return "uxtab16";
|
||||
case kUxtah:
|
||||
return "uxtah";
|
||||
case kUxtb:
|
||||
return "uxtb";
|
||||
case kUxtb16:
|
||||
return "uxtb16";
|
||||
case kUxth:
|
||||
return "uxth";
|
||||
case kVaba:
|
||||
return "vaba";
|
||||
case kVabal:
|
||||
return "vabal";
|
||||
case kVabd:
|
||||
return "vabd";
|
||||
case kVabdl:
|
||||
return "vabdl";
|
||||
case kVabs:
|
||||
return "vabs";
|
||||
case kVacge:
|
||||
return "vacge";
|
||||
case kVacgt:
|
||||
return "vacgt";
|
||||
case kVacle:
|
||||
return "vacle";
|
||||
case kVaclt:
|
||||
return "vaclt";
|
||||
case kVadd:
|
||||
return "vadd";
|
||||
case kVaddhn:
|
||||
return "vaddhn";
|
||||
case kVaddl:
|
||||
return "vaddl";
|
||||
case kVaddw:
|
||||
return "vaddw";
|
||||
case kVand:
|
||||
return "vand";
|
||||
case kVbic:
|
||||
return "vbic";
|
||||
case kVbif:
|
||||
return "vbif";
|
||||
case kVbit:
|
||||
return "vbit";
|
||||
case kVbsl:
|
||||
return "vbsl";
|
||||
case kVceq:
|
||||
return "vceq";
|
||||
case kVcge:
|
||||
return "vcge";
|
||||
case kVcgt:
|
||||
return "vcgt";
|
||||
case kVcle:
|
||||
return "vcle";
|
||||
case kVcls:
|
||||
return "vcls";
|
||||
case kVclt:
|
||||
return "vclt";
|
||||
case kVclz:
|
||||
return "vclz";
|
||||
case kVcmp:
|
||||
return "vcmp";
|
||||
case kVcmpe:
|
||||
return "vcmpe";
|
||||
case kVcnt:
|
||||
return "vcnt";
|
||||
case kVcvt:
|
||||
return "vcvt";
|
||||
case kVcvta:
|
||||
return "vcvta";
|
||||
case kVcvtb:
|
||||
return "vcvtb";
|
||||
case kVcvtm:
|
||||
return "vcvtm";
|
||||
case kVcvtn:
|
||||
return "vcvtn";
|
||||
case kVcvtp:
|
||||
return "vcvtp";
|
||||
case kVcvtr:
|
||||
return "vcvtr";
|
||||
case kVcvtt:
|
||||
return "vcvtt";
|
||||
case kVdiv:
|
||||
return "vdiv";
|
||||
case kVdup:
|
||||
return "vdup";
|
||||
case kVeor:
|
||||
return "veor";
|
||||
case kVext:
|
||||
return "vext";
|
||||
case kVfma:
|
||||
return "vfma";
|
||||
case kVfms:
|
||||
return "vfms";
|
||||
case kVfnma:
|
||||
return "vfnma";
|
||||
case kVfnms:
|
||||
return "vfnms";
|
||||
case kVhadd:
|
||||
return "vhadd";
|
||||
case kVhsub:
|
||||
return "vhsub";
|
||||
case kVld1:
|
||||
return "vld1";
|
||||
case kVld2:
|
||||
return "vld2";
|
||||
case kVld3:
|
||||
return "vld3";
|
||||
case kVld4:
|
||||
return "vld4";
|
||||
case kVldm:
|
||||
return "vldm";
|
||||
case kVldmdb:
|
||||
return "vldmdb";
|
||||
case kVldmia:
|
||||
return "vldmia";
|
||||
case kVldr:
|
||||
return "vldr";
|
||||
case kVmax:
|
||||
return "vmax";
|
||||
case kVmaxnm:
|
||||
return "vmaxnm";
|
||||
case kVmin:
|
||||
return "vmin";
|
||||
case kVminnm:
|
||||
return "vminnm";
|
||||
case kVmla:
|
||||
return "vmla";
|
||||
case kVmlal:
|
||||
return "vmlal";
|
||||
case kVmls:
|
||||
return "vmls";
|
||||
case kVmlsl:
|
||||
return "vmlsl";
|
||||
case kVmov:
|
||||
return "vmov";
|
||||
case kVmovl:
|
||||
return "vmovl";
|
||||
case kVmovn:
|
||||
return "vmovn";
|
||||
case kVmrs:
|
||||
return "vmrs";
|
||||
case kVmsr:
|
||||
return "vmsr";
|
||||
case kVmul:
|
||||
return "vmul";
|
||||
case kVmull:
|
||||
return "vmull";
|
||||
case kVmvn:
|
||||
return "vmvn";
|
||||
case kVneg:
|
||||
return "vneg";
|
||||
case kVnmla:
|
||||
return "vnmla";
|
||||
case kVnmls:
|
||||
return "vnmls";
|
||||
case kVnmul:
|
||||
return "vnmul";
|
||||
case kVorn:
|
||||
return "vorn";
|
||||
case kVorr:
|
||||
return "vorr";
|
||||
case kVpadal:
|
||||
return "vpadal";
|
||||
case kVpadd:
|
||||
return "vpadd";
|
||||
case kVpaddl:
|
||||
return "vpaddl";
|
||||
case kVpmax:
|
||||
return "vpmax";
|
||||
case kVpmin:
|
||||
return "vpmin";
|
||||
case kVpop:
|
||||
return "vpop";
|
||||
case kVpush:
|
||||
return "vpush";
|
||||
case kVqabs:
|
||||
return "vqabs";
|
||||
case kVqadd:
|
||||
return "vqadd";
|
||||
case kVqdmlal:
|
||||
return "vqdmlal";
|
||||
case kVqdmlsl:
|
||||
return "vqdmlsl";
|
||||
case kVqdmulh:
|
||||
return "vqdmulh";
|
||||
case kVqdmull:
|
||||
return "vqdmull";
|
||||
case kVqmovn:
|
||||
return "vqmovn";
|
||||
case kVqmovun:
|
||||
return "vqmovun";
|
||||
case kVqneg:
|
||||
return "vqneg";
|
||||
case kVqrdmulh:
|
||||
return "vqrdmulh";
|
||||
case kVqrshl:
|
||||
return "vqrshl";
|
||||
case kVqrshrn:
|
||||
return "vqrshrn";
|
||||
case kVqrshrun:
|
||||
return "vqrshrun";
|
||||
case kVqshl:
|
||||
return "vqshl";
|
||||
case kVqshlu:
|
||||
return "vqshlu";
|
||||
case kVqshrn:
|
||||
return "vqshrn";
|
||||
case kVqshrun:
|
||||
return "vqshrun";
|
||||
case kVqsub:
|
||||
return "vqsub";
|
||||
case kVraddhn:
|
||||
return "vraddhn";
|
||||
case kVrecpe:
|
||||
return "vrecpe";
|
||||
case kVrecps:
|
||||
return "vrecps";
|
||||
case kVrev16:
|
||||
return "vrev16";
|
||||
case kVrev32:
|
||||
return "vrev32";
|
||||
case kVrev64:
|
||||
return "vrev64";
|
||||
case kVrhadd:
|
||||
return "vrhadd";
|
||||
case kVrinta:
|
||||
return "vrinta";
|
||||
case kVrintm:
|
||||
return "vrintm";
|
||||
case kVrintn:
|
||||
return "vrintn";
|
||||
case kVrintp:
|
||||
return "vrintp";
|
||||
case kVrintr:
|
||||
return "vrintr";
|
||||
case kVrintx:
|
||||
return "vrintx";
|
||||
case kVrintz:
|
||||
return "vrintz";
|
||||
case kVrshl:
|
||||
return "vrshl";
|
||||
case kVrshr:
|
||||
return "vrshr";
|
||||
case kVrshrn:
|
||||
return "vrshrn";
|
||||
case kVrsqrte:
|
||||
return "vrsqrte";
|
||||
case kVrsqrts:
|
||||
return "vrsqrts";
|
||||
case kVrsra:
|
||||
return "vrsra";
|
||||
case kVrsubhn:
|
||||
return "vrsubhn";
|
||||
case kVseleq:
|
||||
return "vseleq";
|
||||
case kVselge:
|
||||
return "vselge";
|
||||
case kVselgt:
|
||||
return "vselgt";
|
||||
case kVselvs:
|
||||
return "vselvs";
|
||||
case kVshl:
|
||||
return "vshl";
|
||||
case kVshll:
|
||||
return "vshll";
|
||||
case kVshr:
|
||||
return "vshr";
|
||||
case kVshrn:
|
||||
return "vshrn";
|
||||
case kVsli:
|
||||
return "vsli";
|
||||
case kVsqrt:
|
||||
return "vsqrt";
|
||||
case kVsra:
|
||||
return "vsra";
|
||||
case kVsri:
|
||||
return "vsri";
|
||||
case kVst1:
|
||||
return "vst1";
|
||||
case kVst2:
|
||||
return "vst2";
|
||||
case kVst3:
|
||||
return "vst3";
|
||||
case kVst4:
|
||||
return "vst4";
|
||||
case kVstm:
|
||||
return "vstm";
|
||||
case kVstmdb:
|
||||
return "vstmdb";
|
||||
case kVstmia:
|
||||
return "vstmia";
|
||||
case kVstr:
|
||||
return "vstr";
|
||||
case kVsub:
|
||||
return "vsub";
|
||||
case kVsubhn:
|
||||
return "vsubhn";
|
||||
case kVsubl:
|
||||
return "vsubl";
|
||||
case kVsubw:
|
||||
return "vsubw";
|
||||
case kVswp:
|
||||
return "vswp";
|
||||
case kVtbl:
|
||||
return "vtbl";
|
||||
case kVtbx:
|
||||
return "vtbx";
|
||||
case kVtrn:
|
||||
return "vtrn";
|
||||
case kVtst:
|
||||
return "vtst";
|
||||
case kVuzp:
|
||||
return "vuzp";
|
||||
case kVzip:
|
||||
return "vzip";
|
||||
case kYield:
|
||||
return "yield";
|
||||
case kUndefInstructionType:
|
||||
VIXL_UNREACHABLE();
|
||||
return "";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "";
|
||||
} // NOLINT(readability/fn_size)
|
||||
// End of generated code.
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
541
externals/vixl/vixl/src/aarch32/constants-aarch32.h
vendored
Normal file
541
externals/vixl/vixl/src/aarch32/constants-aarch32.h
vendored
Normal file
|
@ -0,0 +1,541 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_CONSTANTS_AARCH32_H_
|
||||
#define VIXL_CONSTANTS_AARCH32_H_
|
||||
|
||||
extern "C" {
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include "globals-vixl.h"
|
||||
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
enum InstructionSet { A32, T32 };
|
||||
#ifdef VIXL_INCLUDE_TARGET_T32_ONLY
|
||||
const InstructionSet kDefaultISA = T32;
|
||||
#else
|
||||
const InstructionSet kDefaultISA = A32;
|
||||
#endif
|
||||
|
||||
const unsigned kRegSizeInBits = 32;
|
||||
const unsigned kRegSizeInBytes = kRegSizeInBits / 8;
|
||||
const unsigned kSRegSizeInBits = 32;
|
||||
const unsigned kSRegSizeInBytes = kSRegSizeInBits / 8;
|
||||
const unsigned kDRegSizeInBits = 64;
|
||||
const unsigned kDRegSizeInBytes = kDRegSizeInBits / 8;
|
||||
const unsigned kQRegSizeInBits = 128;
|
||||
const unsigned kQRegSizeInBytes = kQRegSizeInBits / 8;
|
||||
|
||||
const unsigned kNumberOfRegisters = 16;
|
||||
const unsigned kNumberOfSRegisters = 32;
|
||||
const unsigned kMaxNumberOfDRegisters = 32;
|
||||
const unsigned kNumberOfQRegisters = 16;
|
||||
const unsigned kNumberOfT32LowRegisters = 8;
|
||||
|
||||
const unsigned kIpCode = 12;
|
||||
const unsigned kSpCode = 13;
|
||||
const unsigned kLrCode = 14;
|
||||
const unsigned kPcCode = 15;
|
||||
|
||||
const unsigned kT32PcDelta = 4;
|
||||
const unsigned kA32PcDelta = 8;
|
||||
|
||||
const unsigned kRRXEncodedValue = 3;
|
||||
|
||||
const unsigned kCoprocMask = 0xe;
|
||||
const unsigned kInvalidCoprocMask = 0xa;
|
||||
|
||||
const unsigned kLowestT32_32Opcode = 0xe8000000;
|
||||
|
||||
const uint32_t kUnknownValue = 0xdeadbeef;
|
||||
|
||||
const uint32_t kMaxInstructionSizeInBytes = 4;
|
||||
const uint32_t kA32InstructionSizeInBytes = 4;
|
||||
const uint32_t k32BitT32InstructionSizeInBytes = 4;
|
||||
const uint32_t k16BitT32InstructionSizeInBytes = 2;
|
||||
|
||||
// Maximum size emitted by a single T32 unconditional macro-instruction.
|
||||
const uint32_t kMaxT32MacroInstructionSizeInBytes = 32;
|
||||
|
||||
const uint32_t kCallerSavedRegistersMask = 0x500f;
|
||||
|
||||
const uint16_t k16BitT32NopOpcode = 0xbf00;
|
||||
const uint16_t kCbzCbnzMask = 0xf500;
|
||||
const uint16_t kCbzCbnzValue = 0xb100;
|
||||
|
||||
const int32_t kCbzCbnzRange = 126;
|
||||
const int32_t kBConditionalNarrowRange = 254;
|
||||
const int32_t kBNarrowRange = 2046;
|
||||
const int32_t kNearLabelRange = kBNarrowRange;
|
||||
|
||||
enum SystemFunctionsOpcodes { kPrintfCode };
|
||||
|
||||
enum BranchHint { kNear, kFar, kBranchWithoutHint };
|
||||
|
||||
// Start of generated code.
|
||||
// AArch32 version implemented by the library (v8.0).
|
||||
// The encoding for vX.Y is: (X << 8) | Y.
|
||||
#define AARCH32_VERSION 0x0800
|
||||
|
||||
enum InstructionAttribute {
|
||||
kNoAttribute = 0,
|
||||
kArithmetic = 0x1,
|
||||
kBitwise = 0x2,
|
||||
kShift = 0x4,
|
||||
kAddress = 0x8,
|
||||
kBranch = 0x10,
|
||||
kSystem = 0x20,
|
||||
kFpNeon = 0x40,
|
||||
kLoadStore = 0x80,
|
||||
kLoadStoreMultiple = 0x100
|
||||
};
|
||||
|
||||
enum InstructionType {
|
||||
kUndefInstructionType,
|
||||
kAdc,
|
||||
kAdcs,
|
||||
kAdd,
|
||||
kAdds,
|
||||
kAddw,
|
||||
kAdr,
|
||||
kAnd,
|
||||
kAnds,
|
||||
kAsr,
|
||||
kAsrs,
|
||||
kB,
|
||||
kBfc,
|
||||
kBfi,
|
||||
kBic,
|
||||
kBics,
|
||||
kBkpt,
|
||||
kBl,
|
||||
kBlx,
|
||||
kBx,
|
||||
kBxj,
|
||||
kCbnz,
|
||||
kCbz,
|
||||
kClrex,
|
||||
kClz,
|
||||
kCmn,
|
||||
kCmp,
|
||||
kCrc32b,
|
||||
kCrc32cb,
|
||||
kCrc32ch,
|
||||
kCrc32cw,
|
||||
kCrc32h,
|
||||
kCrc32w,
|
||||
kDmb,
|
||||
kDsb,
|
||||
kEor,
|
||||
kEors,
|
||||
kFldmdbx,
|
||||
kFldmiax,
|
||||
kFstmdbx,
|
||||
kFstmiax,
|
||||
kHlt,
|
||||
kHvc,
|
||||
kIsb,
|
||||
kIt,
|
||||
kLda,
|
||||
kLdab,
|
||||
kLdaex,
|
||||
kLdaexb,
|
||||
kLdaexd,
|
||||
kLdaexh,
|
||||
kLdah,
|
||||
kLdm,
|
||||
kLdmda,
|
||||
kLdmdb,
|
||||
kLdmea,
|
||||
kLdmed,
|
||||
kLdmfa,
|
||||
kLdmfd,
|
||||
kLdmib,
|
||||
kLdr,
|
||||
kLdrb,
|
||||
kLdrd,
|
||||
kLdrex,
|
||||
kLdrexb,
|
||||
kLdrexd,
|
||||
kLdrexh,
|
||||
kLdrh,
|
||||
kLdrsb,
|
||||
kLdrsh,
|
||||
kLsl,
|
||||
kLsls,
|
||||
kLsr,
|
||||
kLsrs,
|
||||
kMla,
|
||||
kMlas,
|
||||
kMls,
|
||||
kMov,
|
||||
kMovs,
|
||||
kMovt,
|
||||
kMovw,
|
||||
kMrs,
|
||||
kMsr,
|
||||
kMul,
|
||||
kMuls,
|
||||
kMvn,
|
||||
kMvns,
|
||||
kNop,
|
||||
kOrn,
|
||||
kOrns,
|
||||
kOrr,
|
||||
kOrrs,
|
||||
kPkhbt,
|
||||
kPkhtb,
|
||||
kPld,
|
||||
kPldw,
|
||||
kPli,
|
||||
kPop,
|
||||
kPush,
|
||||
kQadd,
|
||||
kQadd16,
|
||||
kQadd8,
|
||||
kQasx,
|
||||
kQdadd,
|
||||
kQdsub,
|
||||
kQsax,
|
||||
kQsub,
|
||||
kQsub16,
|
||||
kQsub8,
|
||||
kRbit,
|
||||
kRev,
|
||||
kRev16,
|
||||
kRevsh,
|
||||
kRor,
|
||||
kRors,
|
||||
kRrx,
|
||||
kRrxs,
|
||||
kRsb,
|
||||
kRsbs,
|
||||
kRsc,
|
||||
kRscs,
|
||||
kSadd16,
|
||||
kSadd8,
|
||||
kSasx,
|
||||
kSbc,
|
||||
kSbcs,
|
||||
kSbfx,
|
||||
kSdiv,
|
||||
kSel,
|
||||
kShadd16,
|
||||
kShadd8,
|
||||
kShasx,
|
||||
kShsax,
|
||||
kShsub16,
|
||||
kShsub8,
|
||||
kSmlabb,
|
||||
kSmlabt,
|
||||
kSmlad,
|
||||
kSmladx,
|
||||
kSmlal,
|
||||
kSmlalbb,
|
||||
kSmlalbt,
|
||||
kSmlald,
|
||||
kSmlaldx,
|
||||
kSmlals,
|
||||
kSmlaltb,
|
||||
kSmlaltt,
|
||||
kSmlatb,
|
||||
kSmlatt,
|
||||
kSmlawb,
|
||||
kSmlawt,
|
||||
kSmlsd,
|
||||
kSmlsdx,
|
||||
kSmlsld,
|
||||
kSmlsldx,
|
||||
kSmmla,
|
||||
kSmmlar,
|
||||
kSmmls,
|
||||
kSmmlsr,
|
||||
kSmmul,
|
||||
kSmmulr,
|
||||
kSmuad,
|
||||
kSmuadx,
|
||||
kSmulbb,
|
||||
kSmulbt,
|
||||
kSmull,
|
||||
kSmulls,
|
||||
kSmultb,
|
||||
kSmultt,
|
||||
kSmulwb,
|
||||
kSmulwt,
|
||||
kSmusd,
|
||||
kSmusdx,
|
||||
kSsat,
|
||||
kSsat16,
|
||||
kSsax,
|
||||
kSsub16,
|
||||
kSsub8,
|
||||
kStl,
|
||||
kStlb,
|
||||
kStlex,
|
||||
kStlexb,
|
||||
kStlexd,
|
||||
kStlexh,
|
||||
kStlh,
|
||||
kStm,
|
||||
kStmda,
|
||||
kStmdb,
|
||||
kStmea,
|
||||
kStmed,
|
||||
kStmfa,
|
||||
kStmfd,
|
||||
kStmib,
|
||||
kStr,
|
||||
kStrb,
|
||||
kStrd,
|
||||
kStrex,
|
||||
kStrexb,
|
||||
kStrexd,
|
||||
kStrexh,
|
||||
kStrh,
|
||||
kSub,
|
||||
kSubs,
|
||||
kSubw,
|
||||
kSvc,
|
||||
kSxtab,
|
||||
kSxtab16,
|
||||
kSxtah,
|
||||
kSxtb,
|
||||
kSxtb16,
|
||||
kSxth,
|
||||
kTbb,
|
||||
kTbh,
|
||||
kTeq,
|
||||
kTst,
|
||||
kUadd16,
|
||||
kUadd8,
|
||||
kUasx,
|
||||
kUbfx,
|
||||
kUdf,
|
||||
kUdiv,
|
||||
kUhadd16,
|
||||
kUhadd8,
|
||||
kUhasx,
|
||||
kUhsax,
|
||||
kUhsub16,
|
||||
kUhsub8,
|
||||
kUmaal,
|
||||
kUmlal,
|
||||
kUmlals,
|
||||
kUmull,
|
||||
kUmulls,
|
||||
kUqadd16,
|
||||
kUqadd8,
|
||||
kUqasx,
|
||||
kUqsax,
|
||||
kUqsub16,
|
||||
kUqsub8,
|
||||
kUsad8,
|
||||
kUsada8,
|
||||
kUsat,
|
||||
kUsat16,
|
||||
kUsax,
|
||||
kUsub16,
|
||||
kUsub8,
|
||||
kUxtab,
|
||||
kUxtab16,
|
||||
kUxtah,
|
||||
kUxtb,
|
||||
kUxtb16,
|
||||
kUxth,
|
||||
kVaba,
|
||||
kVabal,
|
||||
kVabd,
|
||||
kVabdl,
|
||||
kVabs,
|
||||
kVacge,
|
||||
kVacgt,
|
||||
kVacle,
|
||||
kVaclt,
|
||||
kVadd,
|
||||
kVaddhn,
|
||||
kVaddl,
|
||||
kVaddw,
|
||||
kVand,
|
||||
kVbic,
|
||||
kVbif,
|
||||
kVbit,
|
||||
kVbsl,
|
||||
kVceq,
|
||||
kVcge,
|
||||
kVcgt,
|
||||
kVcle,
|
||||
kVcls,
|
||||
kVclt,
|
||||
kVclz,
|
||||
kVcmp,
|
||||
kVcmpe,
|
||||
kVcnt,
|
||||
kVcvt,
|
||||
kVcvta,
|
||||
kVcvtb,
|
||||
kVcvtm,
|
||||
kVcvtn,
|
||||
kVcvtp,
|
||||
kVcvtr,
|
||||
kVcvtt,
|
||||
kVdiv,
|
||||
kVdup,
|
||||
kVeor,
|
||||
kVext,
|
||||
kVfma,
|
||||
kVfms,
|
||||
kVfnma,
|
||||
kVfnms,
|
||||
kVhadd,
|
||||
kVhsub,
|
||||
kVld1,
|
||||
kVld2,
|
||||
kVld3,
|
||||
kVld4,
|
||||
kVldm,
|
||||
kVldmdb,
|
||||
kVldmia,
|
||||
kVldr,
|
||||
kVmax,
|
||||
kVmaxnm,
|
||||
kVmin,
|
||||
kVminnm,
|
||||
kVmla,
|
||||
kVmlal,
|
||||
kVmls,
|
||||
kVmlsl,
|
||||
kVmov,
|
||||
kVmovl,
|
||||
kVmovn,
|
||||
kVmrs,
|
||||
kVmsr,
|
||||
kVmul,
|
||||
kVmull,
|
||||
kVmvn,
|
||||
kVneg,
|
||||
kVnmla,
|
||||
kVnmls,
|
||||
kVnmul,
|
||||
kVorn,
|
||||
kVorr,
|
||||
kVpadal,
|
||||
kVpadd,
|
||||
kVpaddl,
|
||||
kVpmax,
|
||||
kVpmin,
|
||||
kVpop,
|
||||
kVpush,
|
||||
kVqabs,
|
||||
kVqadd,
|
||||
kVqdmlal,
|
||||
kVqdmlsl,
|
||||
kVqdmulh,
|
||||
kVqdmull,
|
||||
kVqmovn,
|
||||
kVqmovun,
|
||||
kVqneg,
|
||||
kVqrdmulh,
|
||||
kVqrshl,
|
||||
kVqrshrn,
|
||||
kVqrshrun,
|
||||
kVqshl,
|
||||
kVqshlu,
|
||||
kVqshrn,
|
||||
kVqshrun,
|
||||
kVqsub,
|
||||
kVraddhn,
|
||||
kVrecpe,
|
||||
kVrecps,
|
||||
kVrev16,
|
||||
kVrev32,
|
||||
kVrev64,
|
||||
kVrhadd,
|
||||
kVrinta,
|
||||
kVrintm,
|
||||
kVrintn,
|
||||
kVrintp,
|
||||
kVrintr,
|
||||
kVrintx,
|
||||
kVrintz,
|
||||
kVrshl,
|
||||
kVrshr,
|
||||
kVrshrn,
|
||||
kVrsqrte,
|
||||
kVrsqrts,
|
||||
kVrsra,
|
||||
kVrsubhn,
|
||||
kVseleq,
|
||||
kVselge,
|
||||
kVselgt,
|
||||
kVselvs,
|
||||
kVshl,
|
||||
kVshll,
|
||||
kVshr,
|
||||
kVshrn,
|
||||
kVsli,
|
||||
kVsqrt,
|
||||
kVsra,
|
||||
kVsri,
|
||||
kVst1,
|
||||
kVst2,
|
||||
kVst3,
|
||||
kVst4,
|
||||
kVstm,
|
||||
kVstmdb,
|
||||
kVstmia,
|
||||
kVstr,
|
||||
kVsub,
|
||||
kVsubhn,
|
||||
kVsubl,
|
||||
kVsubw,
|
||||
kVswp,
|
||||
kVtbl,
|
||||
kVtbx,
|
||||
kVtrn,
|
||||
kVtst,
|
||||
kVuzp,
|
||||
kVzip,
|
||||
kYield
|
||||
};
|
||||
|
||||
const char* ToCString(InstructionType type);
|
||||
// End of generated code.
|
||||
|
||||
inline InstructionAttribute operator|(InstructionAttribute left,
|
||||
InstructionAttribute right) {
|
||||
return static_cast<InstructionAttribute>(static_cast<uint32_t>(left) |
|
||||
static_cast<uint32_t>(right));
|
||||
}
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_CONSTANTS_AARCH32_H_
|
67276
externals/vixl/vixl/src/aarch32/disasm-aarch32.cc
vendored
Normal file
67276
externals/vixl/vixl/src/aarch32/disasm-aarch32.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
2723
externals/vixl/vixl/src/aarch32/disasm-aarch32.h
vendored
Normal file
2723
externals/vixl/vixl/src/aarch32/disasm-aarch32.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
742
externals/vixl/vixl/src/aarch32/instructions-aarch32.cc
vendored
Normal file
742
externals/vixl/vixl/src/aarch32/instructions-aarch32.cc
vendored
Normal file
|
@ -0,0 +1,742 @@
|
|||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
extern "C" {
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include <cassert>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <iostream>
|
||||
|
||||
#include "utils-vixl.h"
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
|
||||
bool Shift::IsValidAmount(uint32_t amount) const {
|
||||
switch (GetType()) {
|
||||
case LSL:
|
||||
return amount <= 31;
|
||||
case ROR:
|
||||
return (amount > 0) && (amount <= 31);
|
||||
case LSR:
|
||||
case ASR:
|
||||
return (amount > 0) && (amount <= 32);
|
||||
case RRX:
|
||||
return amount == 0;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Register reg) {
|
||||
switch (reg.GetCode()) {
|
||||
case 12:
|
||||
return os << "ip";
|
||||
case 13:
|
||||
return os << "sp";
|
||||
case 14:
|
||||
return os << "lr";
|
||||
case 15:
|
||||
return os << "pc";
|
||||
default:
|
||||
return os << "r" << reg.GetCode();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
SRegister VRegister::S() const {
|
||||
VIXL_ASSERT(GetType() == kSRegister);
|
||||
return SRegister(GetCode());
|
||||
}
|
||||
|
||||
|
||||
DRegister VRegister::D() const {
|
||||
VIXL_ASSERT(GetType() == kDRegister);
|
||||
return DRegister(GetCode());
|
||||
}
|
||||
|
||||
|
||||
QRegister VRegister::Q() const {
|
||||
VIXL_ASSERT(GetType() == kQRegister);
|
||||
return QRegister(GetCode());
|
||||
}
|
||||
|
||||
|
||||
Register RegisterList::GetFirstAvailableRegister() const {
|
||||
for (uint32_t i = 0; i < kNumberOfRegisters; i++) {
|
||||
if (((list_ >> i) & 1) != 0) return Register(i);
|
||||
}
|
||||
return Register();
|
||||
}
|
||||
|
||||
|
||||
std::ostream& PrintRegisterList(std::ostream& os, // NOLINT(runtime/references)
|
||||
uint32_t list) {
|
||||
os << "{";
|
||||
bool first = true;
|
||||
int code = 0;
|
||||
while (list != 0) {
|
||||
if ((list & 1) != 0) {
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
os << ",";
|
||||
}
|
||||
os << Register(code);
|
||||
}
|
||||
list >>= 1;
|
||||
code++;
|
||||
}
|
||||
os << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, RegisterList registers) {
|
||||
return PrintRegisterList(os, registers.GetList());
|
||||
}
|
||||
|
||||
|
||||
QRegister VRegisterList::GetFirstAvailableQRegister() const {
|
||||
for (uint32_t i = 0; i < kNumberOfQRegisters; i++) {
|
||||
if (((list_ >> (i * 4)) & 0xf) == 0xf) return QRegister(i);
|
||||
}
|
||||
return QRegister();
|
||||
}
|
||||
|
||||
|
||||
DRegister VRegisterList::GetFirstAvailableDRegister() const {
|
||||
for (uint32_t i = 0; i < kMaxNumberOfDRegisters; i++) {
|
||||
if (((list_ >> (i * 2)) & 0x3) == 0x3) return DRegister(i);
|
||||
}
|
||||
return DRegister();
|
||||
}
|
||||
|
||||
|
||||
SRegister VRegisterList::GetFirstAvailableSRegister() const {
|
||||
for (uint32_t i = 0; i < kNumberOfSRegisters; i++) {
|
||||
if (((list_ >> i) & 0x1) != 0) return SRegister(i);
|
||||
}
|
||||
return SRegister();
|
||||
}
|
||||
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, SRegisterList reglist) {
|
||||
SRegister first = reglist.GetFirstSRegister();
|
||||
SRegister last = reglist.GetLastSRegister();
|
||||
if (first.Is(last))
|
||||
os << "{" << first << "}";
|
||||
else
|
||||
os << "{" << first << "-" << last << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, DRegisterList reglist) {
|
||||
DRegister first = reglist.GetFirstDRegister();
|
||||
DRegister last = reglist.GetLastDRegister();
|
||||
if (first.Is(last))
|
||||
os << "{" << first << "}";
|
||||
else
|
||||
os << "{" << first << "-" << last << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, NeonRegisterList nreglist) {
|
||||
DRegister first = nreglist.GetFirstDRegister();
|
||||
int increment = nreglist.IsSingleSpaced() ? 1 : 2;
|
||||
int count =
|
||||
nreglist.GetLastDRegister().GetCode() - first.GetCode() + increment;
|
||||
if (count < 0) count += kMaxNumberOfDRegisters;
|
||||
os << "{";
|
||||
bool first_displayed = false;
|
||||
for (;;) {
|
||||
if (first_displayed) {
|
||||
os << ",";
|
||||
} else {
|
||||
first_displayed = true;
|
||||
}
|
||||
os << first;
|
||||
if (nreglist.IsTransferOneLane()) {
|
||||
os << "[" << nreglist.GetTransferLane() << "]";
|
||||
} else if (nreglist.IsTransferAllLanes()) {
|
||||
os << "[]";
|
||||
}
|
||||
count -= increment;
|
||||
if (count <= 0) break;
|
||||
unsigned next = first.GetCode() + increment;
|
||||
if (next >= kMaxNumberOfDRegisters) next -= kMaxNumberOfDRegisters;
|
||||
first = DRegister(next);
|
||||
}
|
||||
os << "}";
|
||||
return os;
|
||||
}
|
||||
|
||||
|
||||
const char* SpecialRegister::GetName() const {
|
||||
switch (reg_) {
|
||||
case APSR:
|
||||
return "APSR";
|
||||
case SPSR:
|
||||
return "SPSR";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
|
||||
const char* MaskedSpecialRegister::GetName() const {
|
||||
switch (reg_) {
|
||||
case APSR_nzcvq:
|
||||
return "APSR_nzcvq";
|
||||
case APSR_g:
|
||||
return "APSR_g";
|
||||
case APSR_nzcvqg:
|
||||
return "APSR_nzcvqg";
|
||||
case CPSR_c:
|
||||
return "CPSR_c";
|
||||
case CPSR_x:
|
||||
return "CPSR_x";
|
||||
case CPSR_xc:
|
||||
return "CPSR_xc";
|
||||
case CPSR_sc:
|
||||
return "CPSR_sc";
|
||||
case CPSR_sx:
|
||||
return "CPSR_sx";
|
||||
case CPSR_sxc:
|
||||
return "CPSR_sxc";
|
||||
case CPSR_fc:
|
||||
return "CPSR_fc";
|
||||
case CPSR_fx:
|
||||
return "CPSR_fx";
|
||||
case CPSR_fxc:
|
||||
return "CPSR_fxc";
|
||||
case CPSR_fsc:
|
||||
return "CPSR_fsc";
|
||||
case CPSR_fsx:
|
||||
return "CPSR_fsx";
|
||||
case CPSR_fsxc:
|
||||
return "CPSR_fsxc";
|
||||
case SPSR_c:
|
||||
return "SPSR_c";
|
||||
case SPSR_x:
|
||||
return "SPSR_x";
|
||||
case SPSR_xc:
|
||||
return "SPSR_xc";
|
||||
case SPSR_s:
|
||||
return "SPSR_s";
|
||||
case SPSR_sc:
|
||||
return "SPSR_sc";
|
||||
case SPSR_sx:
|
||||
return "SPSR_sx";
|
||||
case SPSR_sxc:
|
||||
return "SPSR_sxc";
|
||||
case SPSR_f:
|
||||
return "SPSR_f";
|
||||
case SPSR_fc:
|
||||
return "SPSR_fc";
|
||||
case SPSR_fx:
|
||||
return "SPSR_fx";
|
||||
case SPSR_fxc:
|
||||
return "SPSR_fxc";
|
||||
case SPSR_fs:
|
||||
return "SPSR_fs";
|
||||
case SPSR_fsc:
|
||||
return "SPSR_fsc";
|
||||
case SPSR_fsx:
|
||||
return "SPSR_fsx";
|
||||
case SPSR_fsxc:
|
||||
return "SPSR_fsxc";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
|
||||
const char* BankedRegister::GetName() const {
|
||||
switch (reg_) {
|
||||
case R8_usr:
|
||||
return "R8_usr";
|
||||
case R9_usr:
|
||||
return "R9_usr";
|
||||
case R10_usr:
|
||||
return "R10_usr";
|
||||
case R11_usr:
|
||||
return "R11_usr";
|
||||
case R12_usr:
|
||||
return "R12_usr";
|
||||
case SP_usr:
|
||||
return "SP_usr";
|
||||
case LR_usr:
|
||||
return "LR_usr";
|
||||
case R8_fiq:
|
||||
return "R8_fiq";
|
||||
case R9_fiq:
|
||||
return "R9_fiq";
|
||||
case R10_fiq:
|
||||
return "R10_fiq";
|
||||
case R11_fiq:
|
||||
return "R11_fiq";
|
||||
case R12_fiq:
|
||||
return "R12_fiq";
|
||||
case SP_fiq:
|
||||
return "SP_fiq";
|
||||
case LR_fiq:
|
||||
return "LR_fiq";
|
||||
case LR_irq:
|
||||
return "LR_irq";
|
||||
case SP_irq:
|
||||
return "SP_irq";
|
||||
case LR_svc:
|
||||
return "LR_svc";
|
||||
case SP_svc:
|
||||
return "SP_svc";
|
||||
case LR_abt:
|
||||
return "LR_abt";
|
||||
case SP_abt:
|
||||
return "SP_abt";
|
||||
case LR_und:
|
||||
return "LR_und";
|
||||
case SP_und:
|
||||
return "SP_und";
|
||||
case LR_mon:
|
||||
return "LR_mon";
|
||||
case SP_mon:
|
||||
return "SP_mon";
|
||||
case ELR_hyp:
|
||||
return "ELR_hyp";
|
||||
case SP_hyp:
|
||||
return "SP_hyp";
|
||||
case SPSR_fiq:
|
||||
return "SPSR_fiq";
|
||||
case SPSR_irq:
|
||||
return "SPSR_irq";
|
||||
case SPSR_svc:
|
||||
return "SPSR_svc";
|
||||
case SPSR_abt:
|
||||
return "SPSR_abt";
|
||||
case SPSR_und:
|
||||
return "SPSR_und";
|
||||
case SPSR_mon:
|
||||
return "SPSR_mon";
|
||||
case SPSR_hyp:
|
||||
return "SPSR_hyp";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
const char* SpecialFPRegister::GetName() const {
|
||||
switch (reg_) {
|
||||
case FPSID:
|
||||
return "FPSID";
|
||||
case FPSCR:
|
||||
return "FPSCR";
|
||||
case MVFR2:
|
||||
return "MVFR2";
|
||||
case MVFR1:
|
||||
return "MVFR1";
|
||||
case MVFR0:
|
||||
return "MVFR0";
|
||||
case FPEXC:
|
||||
return "FPEXC";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
|
||||
const char* Condition::GetName() const {
|
||||
switch (condition_) {
|
||||
case eq:
|
||||
return "eq";
|
||||
case ne:
|
||||
return "ne";
|
||||
case cs:
|
||||
return "cs";
|
||||
case cc:
|
||||
return "cc";
|
||||
case mi:
|
||||
return "mi";
|
||||
case pl:
|
||||
return "pl";
|
||||
case vs:
|
||||
return "vs";
|
||||
case vc:
|
||||
return "vc";
|
||||
case hi:
|
||||
return "hi";
|
||||
case ls:
|
||||
return "ls";
|
||||
case ge:
|
||||
return "ge";
|
||||
case lt:
|
||||
return "lt";
|
||||
case gt:
|
||||
return "gt";
|
||||
case le:
|
||||
return "le";
|
||||
case al:
|
||||
return "";
|
||||
case Condition::kNone:
|
||||
return "";
|
||||
}
|
||||
return "<und>";
|
||||
}
|
||||
|
||||
|
||||
const char* Shift::GetName() const {
|
||||
switch (shift_) {
|
||||
case LSL:
|
||||
return "lsl";
|
||||
case LSR:
|
||||
return "lsr";
|
||||
case ASR:
|
||||
return "asr";
|
||||
case ROR:
|
||||
return "ror";
|
||||
case RRX:
|
||||
return "rrx";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
|
||||
const char* EncodingSize::GetName() const {
|
||||
switch (size_) {
|
||||
case Best:
|
||||
case Narrow:
|
||||
return "";
|
||||
case Wide:
|
||||
return ".w";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
|
||||
const char* DataType::GetName() const {
|
||||
switch (value_) {
|
||||
case kDataTypeValueInvalid:
|
||||
return ".??";
|
||||
case kDataTypeValueNone:
|
||||
return "";
|
||||
case S8:
|
||||
return ".s8";
|
||||
case S16:
|
||||
return ".s16";
|
||||
case S32:
|
||||
return ".s32";
|
||||
case S64:
|
||||
return ".s64";
|
||||
case U8:
|
||||
return ".u8";
|
||||
case U16:
|
||||
return ".u16";
|
||||
case U32:
|
||||
return ".u32";
|
||||
case U64:
|
||||
return ".u64";
|
||||
case F16:
|
||||
return ".f16";
|
||||
case F32:
|
||||
return ".f32";
|
||||
case F64:
|
||||
return ".f64";
|
||||
case I8:
|
||||
return ".i8";
|
||||
case I16:
|
||||
return ".i16";
|
||||
case I32:
|
||||
return ".i32";
|
||||
case I64:
|
||||
return ".i64";
|
||||
case P8:
|
||||
return ".p8";
|
||||
case P64:
|
||||
return ".p64";
|
||||
case Untyped8:
|
||||
return ".8";
|
||||
case Untyped16:
|
||||
return ".16";
|
||||
case Untyped32:
|
||||
return ".32";
|
||||
case Untyped64:
|
||||
return ".64";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return ".??";
|
||||
}
|
||||
|
||||
|
||||
const char* MemoryBarrier::GetName() const {
|
||||
switch (type_) {
|
||||
case OSHLD:
|
||||
return "oshld";
|
||||
case OSHST:
|
||||
return "oshst";
|
||||
case OSH:
|
||||
return "osh";
|
||||
case NSHLD:
|
||||
return "nshld";
|
||||
case NSHST:
|
||||
return "nshst";
|
||||
case NSH:
|
||||
return "nsh";
|
||||
case ISHLD:
|
||||
return "ishld";
|
||||
case ISHST:
|
||||
return "ishst";
|
||||
case ISH:
|
||||
return "ish";
|
||||
case LD:
|
||||
return "ld";
|
||||
case ST:
|
||||
return "st";
|
||||
case SY:
|
||||
return "sy";
|
||||
}
|
||||
switch (static_cast<int>(type_)) {
|
||||
case 0:
|
||||
return "#0x0";
|
||||
case 4:
|
||||
return "#0x4";
|
||||
case 8:
|
||||
return "#0x8";
|
||||
case 0xc:
|
||||
return "#0xc";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
|
||||
const char* InterruptFlags::GetName() const {
|
||||
switch (type_) {
|
||||
case F:
|
||||
return "f";
|
||||
case I:
|
||||
return "i";
|
||||
case IF:
|
||||
return "if";
|
||||
case A:
|
||||
return "a";
|
||||
case AF:
|
||||
return "af";
|
||||
case AI:
|
||||
return "ai";
|
||||
case AIF:
|
||||
return "aif";
|
||||
}
|
||||
VIXL_ASSERT(type_ == 0);
|
||||
return "";
|
||||
}
|
||||
|
||||
|
||||
const char* Endianness::GetName() const {
|
||||
switch (type_) {
|
||||
case LE:
|
||||
return "le";
|
||||
case BE:
|
||||
return "be";
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return "??";
|
||||
}
|
||||
|
||||
|
||||
// Constructor used for disassembly.
|
||||
ImmediateShiftOperand::ImmediateShiftOperand(int shift_value, int amount_value)
|
||||
: Shift(shift_value) {
|
||||
switch (shift_value) {
|
||||
case LSL:
|
||||
amount_ = amount_value;
|
||||
break;
|
||||
case LSR:
|
||||
case ASR:
|
||||
amount_ = (amount_value == 0) ? 32 : amount_value;
|
||||
break;
|
||||
case ROR:
|
||||
amount_ = amount_value;
|
||||
if (amount_value == 0) SetType(RRX);
|
||||
break;
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
SetType(LSL);
|
||||
amount_ = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ImmediateT32::ImmediateT32(uint32_t imm) {
|
||||
// 00000000 00000000 00000000 abcdefgh
|
||||
if ((imm & ~0xff) == 0) {
|
||||
SetEncodingValue(imm);
|
||||
return;
|
||||
}
|
||||
if ((imm >> 16) == (imm & 0xffff)) {
|
||||
if ((imm & 0xff00) == 0) {
|
||||
// 00000000 abcdefgh 00000000 abcdefgh
|
||||
SetEncodingValue((imm & 0xff) | (0x1 << 8));
|
||||
return;
|
||||
}
|
||||
if ((imm & 0xff) == 0) {
|
||||
// abcdefgh 00000000 abcdefgh 00000000
|
||||
SetEncodingValue(((imm >> 8) & 0xff) | (0x2 << 8));
|
||||
return;
|
||||
}
|
||||
if (((imm >> 8) & 0xff) == (imm & 0xff)) {
|
||||
// abcdefgh abcdefgh abcdefgh abcdefgh
|
||||
SetEncodingValue((imm & 0xff) | (0x3 << 8));
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (int shift = 0; shift < 24; shift++) {
|
||||
uint32_t imm8 = imm >> (24 - shift);
|
||||
uint32_t overflow = imm << (8 + shift);
|
||||
if ((imm8 <= 0xff) && ((imm8 & 0x80) != 0) && (overflow == 0)) {
|
||||
SetEncodingValue(((shift + 8) << 7) | (imm8 & 0x7F));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline uint32_t ror(uint32_t x, int i) {
|
||||
VIXL_ASSERT((0 < i) && (i < 32));
|
||||
return (x >> i) | (x << (32 - i));
|
||||
}
|
||||
|
||||
|
||||
bool ImmediateT32::IsImmediateT32(uint32_t imm) {
|
||||
/* abcdefgh abcdefgh abcdefgh abcdefgh */
|
||||
if ((imm ^ ror(imm, 8)) == 0) return true;
|
||||
/* 00000000 abcdefgh 00000000 abcdefgh */
|
||||
/* abcdefgh 00000000 abcdefgh 00000000 */
|
||||
if ((imm ^ ror(imm, 16)) == 0 &&
|
||||
(((imm & 0xff00) == 0) || ((imm & 0xff) == 0)))
|
||||
return true;
|
||||
/* isolate least-significant set bit */
|
||||
uint32_t lsb = imm & -imm;
|
||||
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
|
||||
* avoid overflow (underflow is always a successful case) */
|
||||
return ((imm >> 8) < lsb);
|
||||
}
|
||||
|
||||
|
||||
uint32_t ImmediateT32::Decode(uint32_t value) {
|
||||
uint32_t base = value & 0xff;
|
||||
switch (value >> 8) {
|
||||
case 0:
|
||||
return base;
|
||||
case 1:
|
||||
return base | (base << 16);
|
||||
case 2:
|
||||
return (base << 8) | (base << 24);
|
||||
case 3:
|
||||
return base | (base << 8) | (base << 16) | (base << 24);
|
||||
default:
|
||||
base |= 0x80;
|
||||
return base << (32 - (value >> 7));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ImmediateA32::ImmediateA32(uint32_t imm) {
|
||||
// Deal with rot = 0 first to avoid undefined shift by 32.
|
||||
if (imm <= 0xff) {
|
||||
SetEncodingValue(imm);
|
||||
return;
|
||||
}
|
||||
for (int rot = 2; rot < 32; rot += 2) {
|
||||
uint32_t imm8 = (imm << rot) | (imm >> (32 - rot));
|
||||
if (imm8 <= 0xff) {
|
||||
SetEncodingValue((rot << 7) | imm8);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool ImmediateA32::IsImmediateA32(uint32_t imm) {
|
||||
/* fast-out */
|
||||
if (imm < 256) return true;
|
||||
/* avoid getting confused by wrapped-around bytes (this transform has no
|
||||
* effect on pass/fail results) */
|
||||
if (imm & 0xff000000) imm = ror(imm, 16);
|
||||
/* copy odd-numbered set bits into even-numbered bits immediately below, so
|
||||
* that the least-significant set bit is always an even bit */
|
||||
imm = imm | ((imm >> 1) & 0x55555555);
|
||||
/* isolate least-significant set bit (always even) */
|
||||
uint32_t lsb = imm & -imm;
|
||||
/* if imm is less than lsb*256 then it fits, but instead we test imm/256 to
|
||||
* avoid overflow (underflow is always a successful case) */
|
||||
return ((imm >> 8) < lsb);
|
||||
}
|
||||
|
||||
|
||||
uint32_t ImmediateA32::Decode(uint32_t value) {
|
||||
int rotation = (value >> 8) * 2;
|
||||
VIXL_ASSERT(rotation >= 0);
|
||||
VIXL_ASSERT(rotation <= 30);
|
||||
value &= 0xff;
|
||||
if (rotation == 0) return value;
|
||||
return (value >> rotation) | (value << (32 - rotation));
|
||||
}
|
||||
|
||||
|
||||
uint32_t TypeEncodingValue(Shift shift) {
|
||||
return shift.IsRRX() ? kRRXEncodedValue : shift.GetValue();
|
||||
}
|
||||
|
||||
|
||||
uint32_t AmountEncodingValue(Shift shift, uint32_t amount) {
|
||||
switch (shift.GetType()) {
|
||||
case LSL:
|
||||
case ROR:
|
||||
return amount;
|
||||
case LSR:
|
||||
case ASR:
|
||||
return amount % 32;
|
||||
case RRX:
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
1359
externals/vixl/vixl/src/aarch32/instructions-aarch32.h
vendored
Normal file
1359
externals/vixl/vixl/src/aarch32/instructions-aarch32.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
152
externals/vixl/vixl/src/aarch32/location-aarch32.cc
vendored
Normal file
152
externals/vixl/vixl/src/aarch32/location-aarch32.cc
vendored
Normal file
|
@ -0,0 +1,152 @@
|
|||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "location-aarch32.h"
|
||||
|
||||
#include "assembler-aarch32.h"
|
||||
#include "macro-assembler-aarch32.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
namespace aarch32 {
|
||||
|
||||
bool Location::Needs16BitPadding(int32_t location) const {
|
||||
if (!HasForwardReferences()) return false;
|
||||
const ForwardRef& last_ref = GetLastForwardReference();
|
||||
int32_t min_location_last_ref = last_ref.GetMinLocation();
|
||||
VIXL_ASSERT(min_location_last_ref - location <= 2);
|
||||
return (min_location_last_ref > location);
|
||||
}
|
||||
|
||||
void Location::ResolveReferences(internal::AssemblerBase* assembler) {
|
||||
// Iterate over references and call EncodeLocationFor on each of them.
|
||||
for (ForwardRefListIterator it(this); !it.Done(); it.Advance()) {
|
||||
const ForwardRef& reference = *it.Current();
|
||||
VIXL_ASSERT(reference.LocationIsEncodable(location_));
|
||||
int32_t from = reference.GetLocation();
|
||||
EncodeLocationFor(assembler, from, reference.op());
|
||||
}
|
||||
forward_.clear();
|
||||
}
|
||||
|
||||
static bool Is16BitEncoding(uint16_t instr) {
|
||||
return instr < (kLowestT32_32Opcode >> 16);
|
||||
}
|
||||
|
||||
void Location::EncodeLocationFor(internal::AssemblerBase* assembler,
|
||||
int32_t from,
|
||||
const Location::EmitOperator* encoder) {
|
||||
if (encoder->IsUsingT32()) {
|
||||
uint16_t* instr_ptr =
|
||||
assembler->GetBuffer()->GetOffsetAddress<uint16_t*>(from);
|
||||
if (Is16BitEncoding(instr_ptr[0])) {
|
||||
// The Encode methods always deals with uint32_t types so we need
|
||||
// to explicitly cast it.
|
||||
uint32_t instr = static_cast<uint32_t>(instr_ptr[0]);
|
||||
instr = encoder->Encode(instr, from, this);
|
||||
// The Encode method should not ever set the top 16 bits.
|
||||
VIXL_ASSERT((instr & ~0xffff) == 0);
|
||||
instr_ptr[0] = static_cast<uint16_t>(instr);
|
||||
} else {
|
||||
uint32_t instr =
|
||||
instr_ptr[1] | (static_cast<uint32_t>(instr_ptr[0]) << 16);
|
||||
instr = encoder->Encode(instr, from, this);
|
||||
instr_ptr[0] = static_cast<uint16_t>(instr >> 16);
|
||||
instr_ptr[1] = static_cast<uint16_t>(instr);
|
||||
}
|
||||
} else {
|
||||
uint32_t* instr_ptr =
|
||||
assembler->GetBuffer()->GetOffsetAddress<uint32_t*>(from);
|
||||
instr_ptr[0] = encoder->Encode(instr_ptr[0], from, this);
|
||||
}
|
||||
}
|
||||
|
||||
void Location::AddForwardRef(int32_t instr_location,
|
||||
const EmitOperator& op,
|
||||
const ReferenceInfo* info) {
|
||||
VIXL_ASSERT(referenced_);
|
||||
int32_t from = instr_location + (op.IsUsingT32() ? kT32PcDelta : kA32PcDelta);
|
||||
if (info->pc_needs_aligning == ReferenceInfo::kAlignPc)
|
||||
from = AlignDown(from, 4);
|
||||
int32_t min_object_location = from + info->min_offset;
|
||||
int32_t max_object_location = from + info->max_offset;
|
||||
forward_.insert(ForwardRef(&op,
|
||||
instr_location,
|
||||
info->size,
|
||||
min_object_location,
|
||||
max_object_location,
|
||||
info->alignment));
|
||||
}
|
||||
|
||||
int Location::GetMaxAlignment() const {
|
||||
int max_alignment = GetPoolObjectAlignment();
|
||||
for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
|
||||
it.Advance()) {
|
||||
const ForwardRef& reference = *it.Current();
|
||||
if (reference.GetAlignment() > max_alignment)
|
||||
max_alignment = reference.GetAlignment();
|
||||
}
|
||||
return max_alignment;
|
||||
}
|
||||
|
||||
int Location::GetMinLocation() const {
|
||||
int32_t min_location = 0;
|
||||
for (ForwardRefListIterator it(const_cast<Location*>(this)); !it.Done();
|
||||
it.Advance()) {
|
||||
const ForwardRef& reference = *it.Current();
|
||||
if (reference.GetMinLocation() > min_location)
|
||||
min_location = reference.GetMinLocation();
|
||||
}
|
||||
return min_location;
|
||||
}
|
||||
|
||||
void Label::UpdatePoolObject(PoolObject<int32_t>* object) {
|
||||
VIXL_ASSERT(forward_.size() == 1);
|
||||
const ForwardRef& reference = forward_.Front();
|
||||
object->Update(reference.GetMinLocation(),
|
||||
reference.GetMaxLocation(),
|
||||
reference.GetAlignment());
|
||||
}
|
||||
|
||||
void Label::EmitPoolObject(MacroAssemblerInterface* masm) {
|
||||
MacroAssembler* macro_assembler = static_cast<MacroAssembler*>(masm);
|
||||
|
||||
// Add a new branch to this label.
|
||||
macro_assembler->GetBuffer()->EnsureSpaceFor(kMaxInstructionSizeInBytes);
|
||||
ExactAssemblyScopeWithoutPoolsCheck guard(macro_assembler,
|
||||
kMaxInstructionSizeInBytes,
|
||||
ExactAssemblyScope::kMaximumSize);
|
||||
macro_assembler->b(this);
|
||||
}
|
||||
|
||||
void RawLiteral::EmitPoolObject(MacroAssemblerInterface* masm) {
|
||||
Assembler* assembler = static_cast<Assembler*>(masm->AsAssemblerBase());
|
||||
|
||||
assembler->GetBuffer()->EnsureSpaceFor(GetSize());
|
||||
assembler->GetBuffer()->EmitData(GetDataAddress(), GetSize());
|
||||
}
|
||||
}
|
||||
}
|
411
externals/vixl/vixl/src/aarch32/location-aarch32.h
vendored
Normal file
411
externals/vixl/vixl/src/aarch32/location-aarch32.h
vendored
Normal file
|
@ -0,0 +1,411 @@
|
|||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH32_LABEL_AARCH32_H_
|
||||
#define VIXL_AARCH32_LABEL_AARCH32_H_
|
||||
|
||||
extern "C" {
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
#include <iomanip>
|
||||
#include <list>
|
||||
|
||||
#include "invalset-vixl.h"
|
||||
#include "pool-manager.h"
|
||||
#include "utils-vixl.h"
|
||||
|
||||
#include "constants-aarch32.h"
|
||||
#include "instructions-aarch32.h"
|
||||
|
||||
namespace vixl {
|
||||
|
||||
namespace aarch32 {
|
||||
|
||||
class MacroAssembler;
|
||||
|
||||
class Location : public LocationBase<int32_t> {
|
||||
friend class Assembler;
|
||||
friend class MacroAssembler;
|
||||
|
||||
public:
|
||||
// Unbound location that can be used with the assembler bind() method and
|
||||
// with the assembler methods for generating instructions, but will never
|
||||
// be handled by the pool manager.
|
||||
Location()
|
||||
: LocationBase<int32_t>(kRawLocation, 1 /* placeholder size*/),
|
||||
referenced_(false) {}
|
||||
|
||||
typedef int32_t Offset;
|
||||
|
||||
~Location() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION {
|
||||
#ifdef VIXL_DEBUG
|
||||
if (IsReferenced() && !IsBound()) {
|
||||
VIXL_ABORT_WITH_MSG("Location, label or literal used but not bound.\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
bool IsReferenced() const { return referenced_; }
|
||||
|
||||
private:
|
||||
class EmitOperator {
|
||||
public:
|
||||
explicit EmitOperator(InstructionSet isa) : isa_(isa) {
|
||||
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
|
||||
USE(isa_);
|
||||
VIXL_ASSERT(isa == A32);
|
||||
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
|
||||
USE(isa_);
|
||||
VIXL_ASSERT(isa == T32);
|
||||
#endif
|
||||
}
|
||||
virtual ~EmitOperator() {}
|
||||
virtual uint32_t Encode(uint32_t /*instr*/,
|
||||
Location::Offset /*pc*/,
|
||||
const Location* /*label*/) const {
|
||||
return 0;
|
||||
}
|
||||
#if defined(VIXL_INCLUDE_TARGET_A32_ONLY)
|
||||
bool IsUsingT32() const { return false; }
|
||||
#elif defined(VIXL_INCLUDE_TARGET_T32_ONLY)
|
||||
bool IsUsingT32() const { return true; }
|
||||
#else
|
||||
bool IsUsingT32() const { return isa_ == T32; }
|
||||
#endif
|
||||
|
||||
private:
|
||||
InstructionSet isa_;
|
||||
};
|
||||
|
||||
protected:
|
||||
class ForwardRef : public ForwardReference<int32_t> {
|
||||
public:
|
||||
// Default constructor for InvalSet.
|
||||
ForwardRef() : ForwardReference<int32_t>(0, 0, 0, 0, 1), op_(NULL) {}
|
||||
|
||||
ForwardRef(const Location::EmitOperator* op,
|
||||
int32_t location,
|
||||
int size,
|
||||
int32_t min_object_location,
|
||||
int32_t max_object_location,
|
||||
int object_alignment = 1)
|
||||
: ForwardReference<int32_t>(location,
|
||||
size,
|
||||
min_object_location,
|
||||
max_object_location,
|
||||
object_alignment),
|
||||
op_(op) {}
|
||||
|
||||
const Location::EmitOperator* op() const { return op_; }
|
||||
|
||||
// We must provide comparison operators to work with InvalSet.
|
||||
bool operator==(const ForwardRef& other) const {
|
||||
return GetLocation() == other.GetLocation();
|
||||
}
|
||||
bool operator<(const ForwardRef& other) const {
|
||||
return GetLocation() < other.GetLocation();
|
||||
}
|
||||
bool operator<=(const ForwardRef& other) const {
|
||||
return GetLocation() <= other.GetLocation();
|
||||
}
|
||||
bool operator>(const ForwardRef& other) const {
|
||||
return GetLocation() > other.GetLocation();
|
||||
}
|
||||
|
||||
private:
|
||||
const Location::EmitOperator* op_;
|
||||
};
|
||||
|
||||
static const int kNPreallocatedElements = 4;
|
||||
// The following parameters will not affect ForwardRefList in practice, as we
|
||||
// resolve all references at once and clear the list, so we do not need to
|
||||
// remove individual elements by invalidating them.
|
||||
static const int32_t kInvalidLinkKey = INT32_MAX;
|
||||
static const size_t kReclaimFrom = 512;
|
||||
static const size_t kReclaimFactor = 2;
|
||||
|
||||
typedef InvalSet<ForwardRef,
|
||||
kNPreallocatedElements,
|
||||
int32_t,
|
||||
kInvalidLinkKey,
|
||||
kReclaimFrom,
|
||||
kReclaimFactor>
|
||||
ForwardRefListBase;
|
||||
typedef InvalSetIterator<ForwardRefListBase> ForwardRefListIteratorBase;
|
||||
|
||||
class ForwardRefList : public ForwardRefListBase {
|
||||
public:
|
||||
ForwardRefList() : ForwardRefListBase() {}
|
||||
|
||||
using ForwardRefListBase::Back;
|
||||
using ForwardRefListBase::Front;
|
||||
};
|
||||
|
||||
class ForwardRefListIterator : public ForwardRefListIteratorBase {
|
||||
public:
|
||||
explicit ForwardRefListIterator(Location* location)
|
||||
: ForwardRefListIteratorBase(&location->forward_) {}
|
||||
|
||||
// TODO: Remove these and use the STL-like interface instead. We'll need a
|
||||
// const_iterator implemented for this.
|
||||
using ForwardRefListIteratorBase::Advance;
|
||||
using ForwardRefListIteratorBase::Current;
|
||||
};
|
||||
|
||||
// For InvalSet::GetKey() and InvalSet::SetKey().
|
||||
friend class InvalSet<ForwardRef,
|
||||
kNPreallocatedElements,
|
||||
int32_t,
|
||||
kInvalidLinkKey,
|
||||
kReclaimFrom,
|
||||
kReclaimFactor>;
|
||||
|
||||
private:
|
||||
virtual void ResolveReferences(internal::AssemblerBase* assembler)
|
||||
VIXL_OVERRIDE;
|
||||
|
||||
void SetReferenced() { referenced_ = true; }
|
||||
|
||||
bool HasForwardReferences() const { return !forward_.empty(); }
|
||||
|
||||
ForwardRef GetLastForwardReference() const {
|
||||
VIXL_ASSERT(HasForwardReferences());
|
||||
return forward_.Back();
|
||||
}
|
||||
|
||||
// Add forward reference to this object. Called from the assembler.
|
||||
void AddForwardRef(int32_t instr_location,
|
||||
const EmitOperator& op,
|
||||
const ReferenceInfo* info);
|
||||
|
||||
// Check if we need to add padding when binding this object, in order to
|
||||
// meet the minimum location requirement.
|
||||
bool Needs16BitPadding(int location) const;
|
||||
|
||||
void EncodeLocationFor(internal::AssemblerBase* assembler,
|
||||
int32_t from,
|
||||
const Location::EmitOperator* encoder);
|
||||
|
||||
// True if the label has been used at least once.
|
||||
bool referenced_;
|
||||
|
||||
protected:
|
||||
// Types passed to LocationBase. Must be distinct for unbound Locations (not
|
||||
// relevant for bound locations, as they don't have a correspoding
|
||||
// PoolObject).
|
||||
static const int kRawLocation = 0; // Will not be used by the pool manager.
|
||||
static const int kVeneerType = 1;
|
||||
static const int kLiteralType = 2;
|
||||
|
||||
// Contains the references to the unbound label
|
||||
ForwardRefList forward_;
|
||||
|
||||
// To be used only by derived classes.
|
||||
Location(uint32_t type, int size, int alignment)
|
||||
: LocationBase<int32_t>(type, size, alignment), referenced_(false) {}
|
||||
|
||||
// To be used only by derived classes.
|
||||
explicit Location(Offset location)
|
||||
: LocationBase<int32_t>(location), referenced_(false) {}
|
||||
|
||||
virtual int GetMaxAlignment() const VIXL_OVERRIDE;
|
||||
virtual int GetMinLocation() const VIXL_OVERRIDE;
|
||||
|
||||
private:
|
||||
// Included to make the class concrete, however should never be called.
|
||||
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE {
|
||||
USE(masm);
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
};
|
||||
|
||||
class Label : public Location {
|
||||
static const int kVeneerSize = 4;
|
||||
// Use an alignment of 1 for all architectures. Even though we can bind an
|
||||
// unused label, because of the way the MacroAssembler works we can always be
|
||||
// sure to have the correct buffer alignment for the instruction set we are
|
||||
// using, so we do not need to enforce additional alignment requirements
|
||||
// here.
|
||||
// TODO: Consider modifying the interface of the pool manager to pass an
|
||||
// optional additional alignment to Bind() in order to handle cases where the
|
||||
// buffer could be unaligned.
|
||||
static const int kVeneerAlignment = 1;
|
||||
|
||||
public:
|
||||
Label() : Location(kVeneerType, kVeneerSize, kVeneerAlignment) {}
|
||||
explicit Label(Offset location) : Location(location) {}
|
||||
|
||||
private:
|
||||
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
|
||||
return false;
|
||||
}
|
||||
virtual bool ShouldDeletePoolObjectOnPlacement() const VIXL_OVERRIDE {
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual void UpdatePoolObject(PoolObject<int32_t>* object) VIXL_OVERRIDE;
|
||||
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
|
||||
|
||||
virtual bool UsePoolObjectEmissionMargin() const VIXL_OVERRIDE {
|
||||
return true;
|
||||
}
|
||||
virtual int32_t GetPoolObjectEmissionMargin() const VIXL_OVERRIDE {
|
||||
VIXL_ASSERT(UsePoolObjectEmissionMargin() == true);
|
||||
return 1 * KBytes;
|
||||
}
|
||||
};
|
||||
|
||||
class RawLiteral : public Location {
|
||||
// Some load instructions require alignment to 4 bytes. Since we do
|
||||
// not know what instructions will reference a literal after we place
|
||||
// it, we enforce a 4 byte alignment for literals that are 4 bytes or
|
||||
// larger.
|
||||
static const int kLiteralAlignment = 4;
|
||||
|
||||
public:
|
||||
enum PlacementPolicy { kPlacedWhenUsed, kManuallyPlaced };
|
||||
|
||||
enum DeletionPolicy {
|
||||
kDeletedOnPlacementByPool,
|
||||
kDeletedOnPoolDestruction,
|
||||
kManuallyDeleted
|
||||
};
|
||||
|
||||
RawLiteral(const void* addr,
|
||||
int size,
|
||||
PlacementPolicy placement_policy = kPlacedWhenUsed,
|
||||
DeletionPolicy deletion_policy = kManuallyDeleted)
|
||||
: Location(kLiteralType,
|
||||
size,
|
||||
(size < kLiteralAlignment) ? size : kLiteralAlignment),
|
||||
addr_(addr),
|
||||
manually_placed_(placement_policy == kManuallyPlaced),
|
||||
deletion_policy_(deletion_policy) {
|
||||
// We can't have manually placed literals that are not manually deleted.
|
||||
VIXL_ASSERT(!IsManuallyPlaced() ||
|
||||
(GetDeletionPolicy() == kManuallyDeleted));
|
||||
}
|
||||
RawLiteral(const void* addr, int size, DeletionPolicy deletion_policy)
|
||||
: Location(kLiteralType,
|
||||
size,
|
||||
(size < kLiteralAlignment) ? size : kLiteralAlignment),
|
||||
addr_(addr),
|
||||
manually_placed_(false),
|
||||
deletion_policy_(deletion_policy) {}
|
||||
const void* GetDataAddress() const { return addr_; }
|
||||
int GetSize() const { return GetPoolObjectSizeInBytes(); }
|
||||
|
||||
bool IsManuallyPlaced() const { return manually_placed_; }
|
||||
|
||||
private:
|
||||
DeletionPolicy GetDeletionPolicy() const { return deletion_policy_; }
|
||||
|
||||
virtual bool ShouldBeDeletedOnPlacementByPoolManager() const VIXL_OVERRIDE {
|
||||
return GetDeletionPolicy() == kDeletedOnPlacementByPool;
|
||||
}
|
||||
virtual bool ShouldBeDeletedOnPoolManagerDestruction() const VIXL_OVERRIDE {
|
||||
return GetDeletionPolicy() == kDeletedOnPoolDestruction;
|
||||
}
|
||||
virtual void EmitPoolObject(MacroAssemblerInterface* masm) VIXL_OVERRIDE;
|
||||
|
||||
// Data address before it's moved into the code buffer.
|
||||
const void* const addr_;
|
||||
// When this flag is true, the label will be placed manually.
|
||||
bool manually_placed_;
|
||||
// When is the literal to be removed from the memory
|
||||
// Can be delete'd when:
|
||||
// moved into the code buffer: kDeletedOnPlacementByPool
|
||||
// the pool is delete'd: kDeletedOnPoolDestruction
|
||||
// or left to the application: kManuallyDeleted.
|
||||
DeletionPolicy deletion_policy_;
|
||||
|
||||
friend class MacroAssembler;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class Literal : public RawLiteral {
|
||||
public:
|
||||
explicit Literal(const T& value,
|
||||
PlacementPolicy placement_policy = kPlacedWhenUsed,
|
||||
DeletionPolicy deletion_policy = kManuallyDeleted)
|
||||
: RawLiteral(&value_, sizeof(T), placement_policy, deletion_policy),
|
||||
value_(value) {}
|
||||
explicit Literal(const T& value, DeletionPolicy deletion_policy)
|
||||
: RawLiteral(&value_, sizeof(T), deletion_policy), value_(value) {}
|
||||
void UpdateValue(const T& value, CodeBuffer* buffer) {
|
||||
value_ = value;
|
||||
if (IsBound()) {
|
||||
buffer->UpdateData(GetLocation(), GetDataAddress(), GetSize());
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
T value_;
|
||||
};
|
||||
|
||||
class StringLiteral : public RawLiteral {
|
||||
public:
|
||||
explicit StringLiteral(const char* str,
|
||||
PlacementPolicy placement_policy = kPlacedWhenUsed,
|
||||
DeletionPolicy deletion_policy = kManuallyDeleted)
|
||||
: RawLiteral(str,
|
||||
static_cast<int>(strlen(str) + 1),
|
||||
placement_policy,
|
||||
deletion_policy) {
|
||||
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
|
||||
}
|
||||
explicit StringLiteral(const char* str, DeletionPolicy deletion_policy)
|
||||
: RawLiteral(str, static_cast<int>(strlen(str) + 1), deletion_policy) {
|
||||
VIXL_ASSERT((strlen(str) + 1) <= kMaxObjectSize);
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace aarch32
|
||||
|
||||
|
||||
// Required InvalSet template specialisations.
|
||||
#define INVAL_SET_TEMPLATE_PARAMETERS \
|
||||
aarch32::Location::ForwardRef, aarch32::Location::kNPreallocatedElements, \
|
||||
int32_t, aarch32::Location::kInvalidLinkKey, \
|
||||
aarch32::Location::kReclaimFrom, aarch32::Location::kReclaimFactor
|
||||
template <>
|
||||
inline int32_t InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::GetKey(
|
||||
const aarch32::Location::ForwardRef& element) {
|
||||
return element.GetLocation();
|
||||
}
|
||||
template <>
|
||||
inline void InvalSet<INVAL_SET_TEMPLATE_PARAMETERS>::SetKey(
|
||||
aarch32::Location::ForwardRef* element, int32_t key) {
|
||||
element->SetLocationToInvalidateOnly(key);
|
||||
}
|
||||
#undef INVAL_SET_TEMPLATE_PARAMETERS
|
||||
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH32_LABEL_AARCH32_H_
|
2312
externals/vixl/vixl/src/aarch32/macro-assembler-aarch32.cc
vendored
Normal file
2312
externals/vixl/vixl/src/aarch32/macro-assembler-aarch32.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
11189
externals/vixl/vixl/src/aarch32/macro-assembler-aarch32.h
vendored
Normal file
11189
externals/vixl/vixl/src/aarch32/macro-assembler-aarch32.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
563
externals/vixl/vixl/src/aarch32/operands-aarch32.cc
vendored
Normal file
563
externals/vixl/vixl/src/aarch32/operands-aarch32.cc
vendored
Normal file
|
@ -0,0 +1,563 @@
|
|||
// Copyright 2017, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
extern "C" {
|
||||
#include <inttypes.h>
|
||||
#include <stdint.h>
|
||||
}
|
||||
|
||||
#include <cassert>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <cstring>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
|
||||
#include "utils-vixl.h"
|
||||
#include "aarch32/constants-aarch32.h"
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
#include "aarch32/operands-aarch32.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
// Operand
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Operand& operand) {
|
||||
if (operand.IsImmediate()) {
|
||||
return os << "#" << operand.GetImmediate();
|
||||
}
|
||||
if (operand.IsImmediateShiftedRegister()) {
|
||||
if ((operand.GetShift().IsLSL() || operand.GetShift().IsROR()) &&
|
||||
(operand.GetShiftAmount() == 0)) {
|
||||
return os << operand.GetBaseRegister();
|
||||
}
|
||||
if (operand.GetShift().IsRRX()) {
|
||||
return os << operand.GetBaseRegister() << ", rrx";
|
||||
}
|
||||
return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " #"
|
||||
<< operand.GetShiftAmount();
|
||||
}
|
||||
if (operand.IsRegisterShiftedRegister()) {
|
||||
return os << operand.GetBaseRegister() << ", " << operand.GetShift() << " "
|
||||
<< operand.GetShiftRegister();
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const NeonImmediate& neon_imm) {
|
||||
if (neon_imm.IsDouble()) {
|
||||
if (neon_imm.imm_.d_ == 0) {
|
||||
if (copysign(1.0, neon_imm.imm_.d_) < 0.0) {
|
||||
return os << "#-0.0";
|
||||
}
|
||||
return os << "#0.0";
|
||||
}
|
||||
return os << "#" << std::setprecision(9) << neon_imm.imm_.d_;
|
||||
}
|
||||
if (neon_imm.IsFloat()) {
|
||||
if (neon_imm.imm_.f_ == 0) {
|
||||
if (copysign(1.0, neon_imm.imm_.d_) < 0.0) return os << "#-0.0";
|
||||
return os << "#0.0";
|
||||
}
|
||||
return os << "#" << std::setprecision(9) << neon_imm.imm_.f_;
|
||||
}
|
||||
if (neon_imm.IsInteger64()) {
|
||||
return os << "#0x" << std::hex << std::setw(16) << std::setfill('0')
|
||||
<< neon_imm.imm_.u64_ << std::dec;
|
||||
}
|
||||
return os << "#" << neon_imm.imm_.u32_;
|
||||
}
|
||||
|
||||
// SOperand
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const SOperand& operand) {
|
||||
if (operand.IsImmediate()) {
|
||||
return os << operand.GetNeonImmediate();
|
||||
}
|
||||
return os << operand.GetRegister();
|
||||
}
|
||||
|
||||
// DOperand
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const DOperand& operand) {
|
||||
if (operand.IsImmediate()) {
|
||||
return os << operand.GetNeonImmediate();
|
||||
}
|
||||
return os << operand.GetRegister();
|
||||
}
|
||||
|
||||
// QOperand
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const QOperand& operand) {
|
||||
if (operand.IsImmediate()) {
|
||||
return os << operand.GetNeonImmediate();
|
||||
}
|
||||
return os << operand.GetRegister();
|
||||
}
|
||||
|
||||
|
||||
ImmediateVbic::ImmediateVbic(DataType dt, const NeonImmediate& neon_imm) {
|
||||
if (neon_imm.IsInteger32()) {
|
||||
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
|
||||
if (dt.GetValue() == I16) {
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x9);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0xb);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
}
|
||||
} else if (dt.GetValue() == I32) {
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x1);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0x3);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
} else if ((immediate & ~0xff0000) == 0) {
|
||||
SetEncodingValue(0x5);
|
||||
SetEncodedImmediate(immediate >> 16);
|
||||
} else if ((immediate & ~0xff000000) == 0) {
|
||||
SetEncodingValue(0x7);
|
||||
SetEncodedImmediate(immediate >> 24);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DataType ImmediateVbic::DecodeDt(uint32_t cmode) {
|
||||
switch (cmode) {
|
||||
case 0x1:
|
||||
case 0x3:
|
||||
case 0x5:
|
||||
case 0x7:
|
||||
return I32;
|
||||
case 0x9:
|
||||
case 0xb:
|
||||
return I16;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return kDataTypeValueInvalid;
|
||||
}
|
||||
|
||||
|
||||
NeonImmediate ImmediateVbic::DecodeImmediate(uint32_t cmode,
|
||||
uint32_t immediate) {
|
||||
switch (cmode) {
|
||||
case 0x1:
|
||||
case 0x9:
|
||||
return immediate;
|
||||
case 0x3:
|
||||
case 0xb:
|
||||
return immediate << 8;
|
||||
case 0x5:
|
||||
return immediate << 16;
|
||||
case 0x7:
|
||||
return immediate << 24;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
ImmediateVmov::ImmediateVmov(DataType dt, const NeonImmediate& neon_imm) {
|
||||
if (neon_imm.IsInteger()) {
|
||||
switch (dt.GetValue()) {
|
||||
case I8:
|
||||
if (neon_imm.CanConvert<uint8_t>()) {
|
||||
SetEncodingValue(0xe);
|
||||
SetEncodedImmediate(neon_imm.GetImmediate<uint8_t>());
|
||||
}
|
||||
break;
|
||||
case I16:
|
||||
if (neon_imm.IsInteger32()) {
|
||||
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x8);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0xa);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case I32:
|
||||
if (neon_imm.IsInteger32()) {
|
||||
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x0);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0x2);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
} else if ((immediate & ~0xff0000) == 0) {
|
||||
SetEncodingValue(0x4);
|
||||
SetEncodedImmediate(immediate >> 16);
|
||||
} else if ((immediate & ~0xff000000) == 0) {
|
||||
SetEncodingValue(0x6);
|
||||
SetEncodedImmediate(immediate >> 24);
|
||||
} else if ((immediate & ~0xff00) == 0xff) {
|
||||
SetEncodingValue(0xc);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
} else if ((immediate & ~0xff0000) == 0xffff) {
|
||||
SetEncodingValue(0xd);
|
||||
SetEncodedImmediate(immediate >> 16);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case I64: {
|
||||
bool is_valid = true;
|
||||
uint32_t encoding = 0;
|
||||
if (neon_imm.IsInteger32()) {
|
||||
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
|
||||
uint32_t mask = 0xff000000;
|
||||
for (uint32_t set_bit = 1 << 3; set_bit != 0; set_bit >>= 1) {
|
||||
if ((immediate & mask) == mask) {
|
||||
encoding |= set_bit;
|
||||
} else if ((immediate & mask) != 0) {
|
||||
is_valid = false;
|
||||
break;
|
||||
}
|
||||
mask >>= 8;
|
||||
}
|
||||
} else {
|
||||
uint64_t immediate = neon_imm.GetImmediate<uint64_t>();
|
||||
uint64_t mask = UINT64_C(0xff) << 56;
|
||||
for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) {
|
||||
if ((immediate & mask) == mask) {
|
||||
encoding |= set_bit;
|
||||
} else if ((immediate & mask) != 0) {
|
||||
is_valid = false;
|
||||
break;
|
||||
}
|
||||
mask >>= 8;
|
||||
}
|
||||
}
|
||||
if (is_valid) {
|
||||
SetEncodingValue(0x1e);
|
||||
SetEncodedImmediate(encoding);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (dt.GetValue()) {
|
||||
case F32:
|
||||
if (neon_imm.IsFloat() || neon_imm.IsDouble()) {
|
||||
ImmediateVFP vfp(neon_imm.GetImmediate<float>());
|
||||
if (vfp.IsValid()) {
|
||||
SetEncodingValue(0xf);
|
||||
SetEncodedImmediate(vfp.GetEncodingValue());
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DataType ImmediateVmov::DecodeDt(uint32_t cmode) {
|
||||
switch (cmode & 0xf) {
|
||||
case 0x0:
|
||||
case 0x2:
|
||||
case 0x4:
|
||||
case 0x6:
|
||||
case 0xc:
|
||||
case 0xd:
|
||||
return I32;
|
||||
case 0x8:
|
||||
case 0xa:
|
||||
return I16;
|
||||
case 0xe:
|
||||
return ((cmode & 0x10) == 0) ? I8 : I64;
|
||||
case 0xf:
|
||||
if ((cmode & 0x10) == 0) return F32;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return kDataTypeValueInvalid;
|
||||
}
|
||||
|
||||
|
||||
NeonImmediate ImmediateVmov::DecodeImmediate(uint32_t cmode,
|
||||
uint32_t immediate) {
|
||||
switch (cmode & 0xf) {
|
||||
case 0x8:
|
||||
case 0x0:
|
||||
return immediate;
|
||||
case 0x2:
|
||||
case 0xa:
|
||||
return immediate << 8;
|
||||
case 0x4:
|
||||
return immediate << 16;
|
||||
case 0x6:
|
||||
return immediate << 24;
|
||||
case 0xc:
|
||||
return (immediate << 8) | 0xff;
|
||||
case 0xd:
|
||||
return (immediate << 16) | 0xffff;
|
||||
case 0xe: {
|
||||
if (cmode == 0x1e) {
|
||||
uint64_t encoding = 0;
|
||||
for (uint32_t set_bit = 1 << 7; set_bit != 0; set_bit >>= 1) {
|
||||
encoding <<= 8;
|
||||
if ((immediate & set_bit) != 0) {
|
||||
encoding |= 0xff;
|
||||
}
|
||||
}
|
||||
return encoding;
|
||||
} else {
|
||||
return immediate;
|
||||
}
|
||||
}
|
||||
case 0xf: {
|
||||
return ImmediateVFP::Decode<float>(immediate);
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
ImmediateVmvn::ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm) {
|
||||
if (neon_imm.IsInteger32()) {
|
||||
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
|
||||
switch (dt.GetValue()) {
|
||||
case I16:
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x8);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0xa);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
}
|
||||
break;
|
||||
case I32:
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x0);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0x2);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
} else if ((immediate & ~0xff0000) == 0) {
|
||||
SetEncodingValue(0x4);
|
||||
SetEncodedImmediate(immediate >> 16);
|
||||
} else if ((immediate & ~0xff000000) == 0) {
|
||||
SetEncodingValue(0x6);
|
||||
SetEncodedImmediate(immediate >> 24);
|
||||
} else if ((immediate & ~0xff00) == 0xff) {
|
||||
SetEncodingValue(0xc);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
} else if ((immediate & ~0xff0000) == 0xffff) {
|
||||
SetEncodingValue(0xd);
|
||||
SetEncodedImmediate(immediate >> 16);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DataType ImmediateVmvn::DecodeDt(uint32_t cmode) {
|
||||
switch (cmode) {
|
||||
case 0x0:
|
||||
case 0x2:
|
||||
case 0x4:
|
||||
case 0x6:
|
||||
case 0xc:
|
||||
case 0xd:
|
||||
return I32;
|
||||
case 0x8:
|
||||
case 0xa:
|
||||
return I16;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return kDataTypeValueInvalid;
|
||||
}
|
||||
|
||||
|
||||
NeonImmediate ImmediateVmvn::DecodeImmediate(uint32_t cmode,
|
||||
uint32_t immediate) {
|
||||
switch (cmode) {
|
||||
case 0x0:
|
||||
case 0x8:
|
||||
return immediate;
|
||||
case 0x2:
|
||||
case 0xa:
|
||||
return immediate << 8;
|
||||
case 0x4:
|
||||
return immediate << 16;
|
||||
case 0x6:
|
||||
return immediate << 24;
|
||||
case 0xc:
|
||||
return (immediate << 8) | 0xff;
|
||||
case 0xd:
|
||||
return (immediate << 16) | 0xffff;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
ImmediateVorr::ImmediateVorr(DataType dt, const NeonImmediate& neon_imm) {
|
||||
if (neon_imm.IsInteger32()) {
|
||||
uint32_t immediate = neon_imm.GetImmediate<uint32_t>();
|
||||
if (dt.GetValue() == I16) {
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x9);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0xb);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
}
|
||||
} else if (dt.GetValue() == I32) {
|
||||
if ((immediate & ~0xff) == 0) {
|
||||
SetEncodingValue(0x1);
|
||||
SetEncodedImmediate(immediate);
|
||||
} else if ((immediate & ~0xff00) == 0) {
|
||||
SetEncodingValue(0x3);
|
||||
SetEncodedImmediate(immediate >> 8);
|
||||
} else if ((immediate & ~0xff0000) == 0) {
|
||||
SetEncodingValue(0x5);
|
||||
SetEncodedImmediate(immediate >> 16);
|
||||
} else if ((immediate & ~0xff000000) == 0) {
|
||||
SetEncodingValue(0x7);
|
||||
SetEncodedImmediate(immediate >> 24);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
DataType ImmediateVorr::DecodeDt(uint32_t cmode) {
|
||||
switch (cmode) {
|
||||
case 0x1:
|
||||
case 0x3:
|
||||
case 0x5:
|
||||
case 0x7:
|
||||
return I32;
|
||||
case 0x9:
|
||||
case 0xb:
|
||||
return I16;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return kDataTypeValueInvalid;
|
||||
}
|
||||
|
||||
|
||||
NeonImmediate ImmediateVorr::DecodeImmediate(uint32_t cmode,
|
||||
uint32_t immediate) {
|
||||
switch (cmode) {
|
||||
case 0x1:
|
||||
case 0x9:
|
||||
return immediate;
|
||||
case 0x3:
|
||||
case 0xb:
|
||||
return immediate << 8;
|
||||
case 0x5:
|
||||
return immediate << 16;
|
||||
case 0x7:
|
||||
return immediate << 24;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
// MemOperand
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const MemOperand& operand) {
|
||||
os << "[" << operand.GetBaseRegister();
|
||||
if (operand.GetAddrMode() == PostIndex) {
|
||||
os << "]";
|
||||
if (operand.IsRegisterOnly()) return os << "!";
|
||||
}
|
||||
if (operand.IsImmediate()) {
|
||||
if ((operand.GetOffsetImmediate() != 0) || operand.GetSign().IsMinus() ||
|
||||
((operand.GetAddrMode() != Offset) && !operand.IsRegisterOnly())) {
|
||||
if (operand.GetOffsetImmediate() == 0) {
|
||||
os << ", #" << operand.GetSign() << operand.GetOffsetImmediate();
|
||||
} else {
|
||||
os << ", #" << operand.GetOffsetImmediate();
|
||||
}
|
||||
}
|
||||
} else if (operand.IsPlainRegister()) {
|
||||
os << ", " << operand.GetSign() << operand.GetOffsetRegister();
|
||||
} else if (operand.IsShiftedRegister()) {
|
||||
os << ", " << operand.GetSign() << operand.GetOffsetRegister()
|
||||
<< ImmediateShiftOperand(operand.GetShift(), operand.GetShiftAmount());
|
||||
} else {
|
||||
VIXL_UNREACHABLE();
|
||||
return os;
|
||||
}
|
||||
if (operand.GetAddrMode() == Offset) {
|
||||
os << "]";
|
||||
} else if (operand.GetAddrMode() == PreIndex) {
|
||||
os << "]!";
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand) {
|
||||
os << "[" << operand.GetBaseRegister() << operand.GetAlignment() << "]";
|
||||
if (operand.GetAddrMode() == PostIndex) {
|
||||
if (operand.IsPlainRegister()) {
|
||||
os << ", " << operand.GetOffsetRegister();
|
||||
} else {
|
||||
os << "!";
|
||||
}
|
||||
}
|
||||
return os;
|
||||
}
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
901
externals/vixl/vixl/src/aarch32/operands-aarch32.h
vendored
Normal file
901
externals/vixl/vixl/src/aarch32/operands-aarch32.h
vendored
Normal file
|
@ -0,0 +1,901 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright
|
||||
// notice, this list of conditions and the following disclaimer in the
|
||||
// documentation and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may
|
||||
// be used to endorse or promote products derived from this software
|
||||
// without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
||||
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
||||
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
||||
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
||||
// POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH32_OPERANDS_AARCH32_H_
|
||||
#define VIXL_AARCH32_OPERANDS_AARCH32_H_
|
||||
|
||||
#include "aarch32/instructions-aarch32.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch32 {
|
||||
|
||||
// Operand represents generic set of arguments to pass to an instruction.
|
||||
//
|
||||
// Usage: <instr> <Rd> , <Operand>
|
||||
//
|
||||
// where <instr> is the instruction to use (e.g., Mov(), Rsb(), etc.)
|
||||
// <Rd> is the destination register
|
||||
// <Operand> is the rest of the arguments to the instruction
|
||||
//
|
||||
// <Operand> can be one of:
|
||||
//
|
||||
// #<imm> - an unsigned 32-bit immediate value
|
||||
// <Rm>, <shift> <#amount> - immediate shifted register
|
||||
// <Rm>, <shift> <Rs> - register shifted register
|
||||
//
|
||||
class Operand {
|
||||
public:
|
||||
// { #<immediate> }
|
||||
// where <immediate> is uint32_t.
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoReg), shift_(LSL), amount_(0), rs_(NoReg) {}
|
||||
Operand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoReg), shift_(LSL), amount_(0), rs_(NoReg) {}
|
||||
|
||||
// rm
|
||||
// where rm is the base register
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(Register rm) // NOLINT(runtime/explicit)
|
||||
: imm_(0), rm_(rm), shift_(LSL), amount_(0), rs_(NoReg) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
}
|
||||
|
||||
// rm, <shift>
|
||||
// where rm is the base register, and
|
||||
// <shift> is RRX
|
||||
Operand(Register rm, Shift shift)
|
||||
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(NoReg) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
VIXL_ASSERT(shift_.IsRRX());
|
||||
}
|
||||
|
||||
// rm, <shift> #<amount>
|
||||
// where rm is the base register, and
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, and
|
||||
// <amount> is uint6_t.
|
||||
Operand(Register rm, Shift shift, uint32_t amount)
|
||||
: imm_(0), rm_(rm), shift_(shift), amount_(amount), rs_(NoReg) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
VIXL_ASSERT(!shift_.IsRRX());
|
||||
#ifdef VIXL_DEBUG
|
||||
switch (shift_.GetType()) {
|
||||
case LSL:
|
||||
VIXL_ASSERT(amount_ <= 31);
|
||||
break;
|
||||
case ROR:
|
||||
VIXL_ASSERT(amount_ <= 31);
|
||||
break;
|
||||
case LSR:
|
||||
case ASR:
|
||||
VIXL_ASSERT(amount_ <= 32);
|
||||
break;
|
||||
case RRX:
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
// rm, <shift> rs
|
||||
// where rm is the base register, and
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, and
|
||||
// rs is the shifted register
|
||||
Operand(Register rm, Shift shift, Register rs)
|
||||
: imm_(0), rm_(rm), shift_(shift), amount_(0), rs_(rs) {
|
||||
VIXL_ASSERT(rm_.IsValid() && rs_.IsValid());
|
||||
VIXL_ASSERT(!shift_.IsRRX());
|
||||
}
|
||||
|
||||
// Factory methods creating operands from any integral or pointer type. The
|
||||
// source must fit into 32 bits.
|
||||
template <typename T>
|
||||
static Operand From(T immediate) {
|
||||
#if __cplusplus >= 201103L
|
||||
VIXL_STATIC_ASSERT_MESSAGE(std::is_integral<T>::value,
|
||||
"An integral type is required to build an "
|
||||
"immediate operand.");
|
||||
#endif
|
||||
// Allow both a signed or unsigned 32 bit integer to be passed, but store it
|
||||
// as a uint32_t. The signedness information will be lost. We have to add a
|
||||
// static_cast to make sure the compiler does not complain about implicit 64
|
||||
// to 32 narrowing. It's perfectly acceptable for the user to pass a 64-bit
|
||||
// value, as long as it can be encoded in 32 bits.
|
||||
VIXL_ASSERT(IsInt32(immediate) || IsUint32(immediate));
|
||||
return Operand(static_cast<uint32_t>(immediate));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static Operand From(T* address) {
|
||||
uintptr_t address_as_integral = reinterpret_cast<uintptr_t>(address);
|
||||
VIXL_ASSERT(IsUint32(address_as_integral));
|
||||
return Operand(static_cast<uint32_t>(address_as_integral));
|
||||
}
|
||||
|
||||
bool IsImmediate() const { return !rm_.IsValid(); }
|
||||
|
||||
bool IsPlainRegister() const {
|
||||
return rm_.IsValid() && !shift_.IsRRX() && !rs_.IsValid() && (amount_ == 0);
|
||||
}
|
||||
|
||||
bool IsImmediateShiftedRegister() const {
|
||||
return rm_.IsValid() && !rs_.IsValid();
|
||||
}
|
||||
|
||||
bool IsRegisterShiftedRegister() const {
|
||||
return rm_.IsValid() && rs_.IsValid();
|
||||
}
|
||||
|
||||
uint32_t GetImmediate() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
return imm_;
|
||||
}
|
||||
|
||||
int32_t GetSignedImmediate() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
int32_t result;
|
||||
memcpy(&result, &imm_, sizeof(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
Register GetBaseRegister() const {
|
||||
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
|
||||
return rm_;
|
||||
}
|
||||
|
||||
Shift GetShift() const {
|
||||
VIXL_ASSERT(IsImmediateShiftedRegister() || IsRegisterShiftedRegister());
|
||||
return shift_;
|
||||
}
|
||||
|
||||
uint32_t GetShiftAmount() const {
|
||||
VIXL_ASSERT(IsImmediateShiftedRegister());
|
||||
return amount_;
|
||||
}
|
||||
|
||||
Register GetShiftRegister() const {
|
||||
VIXL_ASSERT(IsRegisterShiftedRegister());
|
||||
return rs_;
|
||||
}
|
||||
|
||||
uint32_t GetTypeEncodingValue() const {
|
||||
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
|
||||
}
|
||||
|
||||
private:
|
||||
// Forbid implicitely creating operands around types that cannot be encoded
|
||||
// into a uint32_t without loss.
|
||||
#if __cplusplus >= 201103L
|
||||
Operand(int64_t) = delete; // NOLINT(runtime/explicit)
|
||||
Operand(uint64_t) = delete; // NOLINT(runtime/explicit)
|
||||
Operand(float) = delete; // NOLINT(runtime/explicit)
|
||||
Operand(double) = delete; // NOLINT(runtime/explicit)
|
||||
#else
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(int64_t) { // NOLINT(runtime/explicit)
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(uint64_t) { // NOLINT(runtime/explicit)
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(float) { // NOLINT
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
VIXL_NO_RETURN_IN_DEBUG_MODE Operand(double) { // NOLINT
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
#endif
|
||||
|
||||
uint32_t imm_;
|
||||
Register rm_;
|
||||
Shift shift_;
|
||||
uint32_t amount_;
|
||||
Register rs_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const Operand& operand);
|
||||
|
||||
class NeonImmediate {
|
||||
template <typename T>
|
||||
struct DataTypeIdentity {
|
||||
T data_type_;
|
||||
};
|
||||
|
||||
public:
|
||||
// { #<immediate> }
|
||||
// where <immediate> is 32 bit number.
|
||||
// This is allowed to be an implicit constructor because NeonImmediate is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
NeonImmediate(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), immediate_type_(I32) {}
|
||||
NeonImmediate(int immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), immediate_type_(I32) {}
|
||||
|
||||
// { #<immediate> }
|
||||
// where <immediate> is a 64 bit number
|
||||
// This is allowed to be an implicit constructor because NeonImmediate is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
NeonImmediate(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), immediate_type_(I64) {}
|
||||
NeonImmediate(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), immediate_type_(I64) {}
|
||||
|
||||
// { #<immediate> }
|
||||
// where <immediate> is a non zero floating point number which can be encoded
|
||||
// as an 8 bit floating point (checked by the constructor).
|
||||
// This is allowed to be an implicit constructor because NeonImmediate is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
NeonImmediate(float immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), immediate_type_(F32) {}
|
||||
NeonImmediate(double immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), immediate_type_(F64) {}
|
||||
|
||||
NeonImmediate(const NeonImmediate& src)
|
||||
: imm_(src.imm_), immediate_type_(src.immediate_type_) {}
|
||||
|
||||
template <typename T>
|
||||
T GetImmediate() const {
|
||||
return GetImmediate(DataTypeIdentity<T>());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T GetImmediate(const DataTypeIdentity<T>&) const {
|
||||
VIXL_ASSERT(sizeof(T) <= sizeof(uint32_t));
|
||||
VIXL_ASSERT(CanConvert<T>());
|
||||
if (immediate_type_.Is(I64))
|
||||
return static_cast<T>(imm_.u64_ & static_cast<T>(-1));
|
||||
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
|
||||
return static_cast<T>(imm_.u32_ & static_cast<T>(-1));
|
||||
}
|
||||
|
||||
uint64_t GetImmediate(const DataTypeIdentity<uint64_t>&) const {
|
||||
VIXL_ASSERT(CanConvert<uint64_t>());
|
||||
if (immediate_type_.Is(I32)) return imm_.u32_;
|
||||
if (immediate_type_.Is(F64) || immediate_type_.Is(F32)) return 0;
|
||||
return imm_.u64_;
|
||||
}
|
||||
float GetImmediate(const DataTypeIdentity<float>&) const {
|
||||
VIXL_ASSERT(CanConvert<float>());
|
||||
if (immediate_type_.Is(F64)) return static_cast<float>(imm_.d_);
|
||||
return imm_.f_;
|
||||
}
|
||||
double GetImmediate(const DataTypeIdentity<double>&) const {
|
||||
VIXL_ASSERT(CanConvert<double>());
|
||||
if (immediate_type_.Is(F32)) return static_cast<double>(imm_.f_);
|
||||
return imm_.d_;
|
||||
}
|
||||
|
||||
bool IsInteger32() const { return immediate_type_.Is(I32); }
|
||||
bool IsInteger64() const { return immediate_type_.Is(I64); }
|
||||
bool IsInteger() const { return IsInteger32() | IsInteger64(); }
|
||||
bool IsFloat() const { return immediate_type_.Is(F32); }
|
||||
bool IsDouble() const { return immediate_type_.Is(F64); }
|
||||
bool IsFloatZero() const {
|
||||
if (immediate_type_.Is(F32)) return imm_.f_ == 0.0f;
|
||||
if (immediate_type_.Is(F64)) return imm_.d_ == 0.0;
|
||||
return false;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool CanConvert() const {
|
||||
return CanConvert(DataTypeIdentity<T>());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool CanConvert(const DataTypeIdentity<T>&) const {
|
||||
VIXL_ASSERT(sizeof(T) < sizeof(uint32_t));
|
||||
return (immediate_type_.Is(I32) && ((imm_.u32_ >> (8 * sizeof(T))) == 0)) ||
|
||||
(immediate_type_.Is(I64) && ((imm_.u64_ >> (8 * sizeof(T))) == 0)) ||
|
||||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
|
||||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<uint32_t>&) const {
|
||||
return immediate_type_.Is(I32) ||
|
||||
(immediate_type_.Is(I64) && ((imm_.u64_ >> 32) == 0)) ||
|
||||
(immediate_type_.Is(F32) && (imm_.f_ == 0.0f)) ||
|
||||
(immediate_type_.Is(F64) && (imm_.d_ == 0.0));
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<uint64_t>&) const {
|
||||
return IsInteger() || CanConvert<uint32_t>();
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<float>&) const {
|
||||
return IsFloat() || IsDouble();
|
||||
}
|
||||
bool CanConvert(const DataTypeIdentity<double>&) const {
|
||||
return IsFloat() || IsDouble();
|
||||
}
|
||||
friend std::ostream& operator<<(std::ostream& os,
|
||||
const NeonImmediate& operand);
|
||||
|
||||
private:
|
||||
union NeonImmediateType {
|
||||
uint64_t u64_;
|
||||
double d_;
|
||||
uint32_t u32_;
|
||||
float f_;
|
||||
NeonImmediateType(uint64_t u) : u64_(u) {}
|
||||
NeonImmediateType(int64_t u) : u64_(u) {}
|
||||
NeonImmediateType(uint32_t u) : u32_(u) {}
|
||||
NeonImmediateType(int32_t u) : u32_(u) {}
|
||||
NeonImmediateType(double d) : d_(d) {}
|
||||
NeonImmediateType(float f) : f_(f) {}
|
||||
NeonImmediateType(const NeonImmediateType& ref) : u64_(ref.u64_) {}
|
||||
} imm_;
|
||||
|
||||
DataType immediate_type_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const NeonImmediate& operand);
|
||||
|
||||
class NeonOperand {
|
||||
public:
|
||||
NeonOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoDReg) {}
|
||||
NeonOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoDReg) {}
|
||||
NeonOperand(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoDReg) {}
|
||||
NeonOperand(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoDReg) {}
|
||||
NeonOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoDReg) {}
|
||||
NeonOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: imm_(immediate), rm_(NoDReg) {}
|
||||
NeonOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: imm_(imm), rm_(NoDReg) {}
|
||||
NeonOperand(const VRegister& rm) // NOLINT(runtime/explicit)
|
||||
: imm_(0), rm_(rm) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
}
|
||||
|
||||
bool IsImmediate() const { return !rm_.IsValid(); }
|
||||
bool IsRegister() const { return rm_.IsValid(); }
|
||||
bool IsFloatZero() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
return imm_.IsFloatZero();
|
||||
}
|
||||
|
||||
const NeonImmediate& GetNeonImmediate() const { return imm_; }
|
||||
|
||||
VRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister());
|
||||
return rm_;
|
||||
}
|
||||
|
||||
protected:
|
||||
NeonImmediate imm_;
|
||||
VRegister rm_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const NeonOperand& operand);
|
||||
|
||||
// SOperand represents either an immediate or a SRegister.
|
||||
class SOperand : public NeonOperand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is 32bit int
|
||||
// This is allowed to be an implicit constructor because SOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
SOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
SOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
// #<immediate>
|
||||
// where <immediate> is 32bit float
|
||||
SOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
// where <immediate> is 64bit float
|
||||
SOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
SOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(imm) {}
|
||||
|
||||
// rm
|
||||
// This is allowed to be an implicit constructor because SOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
SOperand(SRegister rm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(rm) {}
|
||||
SRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kSRegister));
|
||||
return SRegister(rm_.GetCode());
|
||||
}
|
||||
};
|
||||
|
||||
// DOperand represents either an immediate or a DRegister.
|
||||
std::ostream& operator<<(std::ostream& os, const SOperand& operand);
|
||||
|
||||
class DOperand : public NeonOperand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is uint32_t.
|
||||
// This is allowed to be an implicit constructor because DOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
DOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
// #<immediate>
|
||||
// where <immediate> is a non zero floating point number which can be encoded
|
||||
// as an 8 bit floating point (checked by the constructor).
|
||||
// This is allowed to be an implicit constructor because DOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
DOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
DOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
DOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(imm) {}
|
||||
// rm
|
||||
// This is allowed to be an implicit constructor because DOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
DOperand(DRegister rm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(rm) {}
|
||||
|
||||
DRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kDRegister));
|
||||
return DRegister(rm_.GetCode());
|
||||
}
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const DOperand& operand);
|
||||
|
||||
// QOperand represents either an immediate or a QRegister.
|
||||
class QOperand : public NeonOperand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is uint32_t.
|
||||
// This is allowed to be an implicit constructor because QOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
QOperand(int32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(uint32_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(int64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(uint64_t immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(float immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
QOperand(double immediate) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(immediate) {}
|
||||
|
||||
QOperand(const NeonImmediate& imm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(imm) {}
|
||||
|
||||
// rm
|
||||
// This is allowed to be an implicit constructor because QOperand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
QOperand(QRegister rm) // NOLINT(runtime/explicit)
|
||||
: NeonOperand(rm) {
|
||||
VIXL_ASSERT(rm_.IsValid());
|
||||
}
|
||||
|
||||
QRegister GetRegister() const {
|
||||
VIXL_ASSERT(IsRegister() && (rm_.GetType() == CPURegister::kQRegister));
|
||||
return QRegister(rm_.GetCode());
|
||||
}
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const QOperand& operand);
|
||||
|
||||
class ImmediateVFP : public EncodingValue {
|
||||
template <typename T>
|
||||
struct FloatType {
|
||||
typedef T base_type;
|
||||
};
|
||||
|
||||
public:
|
||||
explicit ImmediateVFP(const NeonImmediate& neon_imm) {
|
||||
if (neon_imm.IsFloat()) {
|
||||
const float imm = neon_imm.GetImmediate<float>();
|
||||
if (VFP::IsImmFP32(imm)) {
|
||||
SetEncodingValue(VFP::FP32ToImm8(imm));
|
||||
}
|
||||
} else if (neon_imm.IsDouble()) {
|
||||
const double imm = neon_imm.GetImmediate<double>();
|
||||
if (VFP::IsImmFP64(imm)) {
|
||||
SetEncodingValue(VFP::FP64ToImm8(imm));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T Decode(uint32_t v) {
|
||||
return Decode(v, FloatType<T>());
|
||||
}
|
||||
|
||||
static float Decode(uint32_t imm8, const FloatType<float>&) {
|
||||
return VFP::Imm8ToFP32(imm8);
|
||||
}
|
||||
|
||||
static double Decode(uint32_t imm8, const FloatType<double>&) {
|
||||
return VFP::Imm8ToFP64(imm8);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
class ImmediateVbic : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVbic(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVand : public ImmediateVbic {
|
||||
public:
|
||||
ImmediateVand(DataType dt, const NeonImmediate neon_imm)
|
||||
: ImmediateVbic(dt, neon_imm) {
|
||||
if (IsValid()) {
|
||||
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class ImmediateVmov : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVmov(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVmvn : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVmvn(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVorr : public EncodingValueAndImmediate {
|
||||
public:
|
||||
ImmediateVorr(DataType dt, const NeonImmediate& neon_imm);
|
||||
static DataType DecodeDt(uint32_t cmode);
|
||||
static NeonImmediate DecodeImmediate(uint32_t cmode, uint32_t immediate);
|
||||
};
|
||||
|
||||
class ImmediateVorn : public ImmediateVorr {
|
||||
public:
|
||||
ImmediateVorn(DataType dt, const NeonImmediate& neon_imm)
|
||||
: ImmediateVorr(dt, neon_imm) {
|
||||
if (IsValid()) {
|
||||
SetEncodedImmediate(~GetEncodedImmediate() & 0xff);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// MemOperand represents the addressing mode of a load or store instruction.
|
||||
//
|
||||
// Usage: <instr> <Rt> , <MemOperand>
|
||||
//
|
||||
// where <instr> is the instruction to use (e.g., Ldr(), Str(), etc.),
|
||||
// <Rt> is general purpose register to be transferred,
|
||||
// <MemOperand> is the rest of the arguments to the instruction
|
||||
//
|
||||
// <MemOperand> can be in one of 3 addressing modes:
|
||||
//
|
||||
// [ <Rn>, <offset> ] == offset addressing
|
||||
// [ <Rn>, <offset> ]! == pre-indexed addressing
|
||||
// [ <Rn> ], <offset> == post-indexed addressing
|
||||
//
|
||||
// where <offset> can be one of:
|
||||
// - an immediate constant, such as <imm8>, <imm12>
|
||||
// - an index register <Rm>
|
||||
// - a shifted index register <Rm>, <shift> #<amount>
|
||||
//
|
||||
// The index register may have an associated {+/-} sign,
|
||||
// which if ommitted, defaults to + .
|
||||
//
|
||||
// We have two constructors for the offset:
|
||||
//
|
||||
// One with a signed value offset parameter. The value of sign_ is
|
||||
// "sign_of(constructor's offset parameter) and the value of offset_ is
|
||||
// "constructor's offset parameter".
|
||||
//
|
||||
// The other with a sign and a positive value offset parameters. The value of
|
||||
// sign_ is "constructor's sign parameter" and the value of offset_ is
|
||||
// "constructor's sign parameter * constructor's offset parameter".
|
||||
//
|
||||
// The value of offset_ reflects the effective offset. For an offset_ of 0,
|
||||
// sign_ can be positive or negative. Otherwise, sign_ always agrees with
|
||||
// the sign of offset_.
|
||||
class MemOperand {
|
||||
public:
|
||||
// rn
|
||||
// where rn is the general purpose base register only
|
||||
explicit MemOperand(Register rn, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode | kMemOperandRegisterOnly) {
|
||||
VIXL_ASSERT(rn_.IsValid());
|
||||
}
|
||||
|
||||
// rn, #<imm>
|
||||
// where rn is the general purpose base register,
|
||||
// <imm> is a 32-bit offset to add to rn
|
||||
//
|
||||
// Note: if rn is PC, then this form is equivalent to a "label"
|
||||
// Note: the second constructor allow minus zero (-0).
|
||||
MemOperand(Register rn, int32_t offset, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(offset),
|
||||
sign_((offset < 0) ? minus : plus),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid());
|
||||
}
|
||||
MemOperand(Register rn, Sign sign, int32_t offset, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(sign.IsPlus() ? offset : -offset),
|
||||
sign_(sign),
|
||||
rm_(NoReg),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid());
|
||||
// With this constructor, the sign must only be specified by "sign".
|
||||
VIXL_ASSERT(offset >= 0);
|
||||
}
|
||||
|
||||
// rn, {+/-}rm
|
||||
// where rn is the general purpose base register,
|
||||
// {+/-} is the sign of the index register,
|
||||
// rm is the general purpose index register,
|
||||
MemOperand(Register rn, Sign sign, Register rm, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(sign),
|
||||
rm_(rm),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
}
|
||||
|
||||
// rn, rm
|
||||
// where rn is the general purpose base register,
|
||||
// rm is the general purpose index register,
|
||||
MemOperand(Register rn, Register rm, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(rm),
|
||||
shift_(LSL),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
}
|
||||
|
||||
// rn, {+/-}rm, <shift>
|
||||
// where rn is the general purpose base register,
|
||||
// {+/-} is the sign of the index register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is RRX, applied to value from rm
|
||||
MemOperand(Register rn,
|
||||
Sign sign,
|
||||
Register rm,
|
||||
Shift shift,
|
||||
AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(sign),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
VIXL_ASSERT(shift_.IsRRX());
|
||||
}
|
||||
|
||||
// rn, rm, <shift>
|
||||
// where rn is the general purpose base register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is RRX, applied to value from rm
|
||||
MemOperand(Register rn, Register rm, Shift shift, AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(0),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
VIXL_ASSERT(shift_.IsRRX());
|
||||
}
|
||||
|
||||
// rn, {+/-}rm, <shift> #<amount>
|
||||
// where rn is the general purpose base register,
|
||||
// {+/-} is the sign of the index register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
|
||||
// <shift_amount> is optional size to apply to value from rm
|
||||
MemOperand(Register rn,
|
||||
Sign sign,
|
||||
Register rm,
|
||||
Shift shift,
|
||||
uint32_t shift_amount,
|
||||
AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(sign),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(shift_amount),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
CheckShift();
|
||||
}
|
||||
|
||||
// rn, rm, <shift> #<amount>
|
||||
// where rn is the general purpose base register,
|
||||
// rm is the general purpose index register,
|
||||
// <shift> is one of {LSL, LSR, ASR, ROR}, applied to value from rm
|
||||
// <shift_amount> is optional size to apply to value from rm
|
||||
MemOperand(Register rn,
|
||||
Register rm,
|
||||
Shift shift,
|
||||
uint32_t shift_amount,
|
||||
AddrMode addrmode = Offset)
|
||||
: rn_(rn),
|
||||
offset_(0),
|
||||
sign_(plus),
|
||||
rm_(rm),
|
||||
shift_(shift),
|
||||
shift_amount_(shift_amount),
|
||||
addrmode_(addrmode) {
|
||||
VIXL_ASSERT(rn_.IsValid() && rm_.IsValid());
|
||||
CheckShift();
|
||||
}
|
||||
|
||||
Register GetBaseRegister() const { return rn_; }
|
||||
int32_t GetOffsetImmediate() const { return offset_; }
|
||||
bool IsOffsetImmediateWithinRange(int min,
|
||||
int max,
|
||||
int multiple_of = 1) const {
|
||||
return (offset_ >= min) && (offset_ <= max) &&
|
||||
((offset_ % multiple_of) == 0);
|
||||
}
|
||||
Sign GetSign() const { return sign_; }
|
||||
Register GetOffsetRegister() const { return rm_; }
|
||||
Shift GetShift() const { return shift_; }
|
||||
unsigned GetShiftAmount() const { return shift_amount_; }
|
||||
AddrMode GetAddrMode() const {
|
||||
return static_cast<AddrMode>(addrmode_ & kMemOperandAddrModeMask);
|
||||
}
|
||||
bool IsRegisterOnly() const {
|
||||
return (addrmode_ & kMemOperandRegisterOnly) != 0;
|
||||
}
|
||||
|
||||
bool IsImmediate() const { return !rm_.IsValid(); }
|
||||
bool IsImmediateZero() const { return !rm_.IsValid() && (offset_ == 0); }
|
||||
bool IsPlainRegister() const {
|
||||
return rm_.IsValid() && shift_.IsLSL() && (shift_amount_ == 0);
|
||||
}
|
||||
bool IsShiftedRegister() const { return rm_.IsValid(); }
|
||||
bool IsImmediateOffset() const {
|
||||
return (GetAddrMode() == Offset) && !rm_.IsValid();
|
||||
}
|
||||
bool IsImmediateZeroOffset() const {
|
||||
return (GetAddrMode() == Offset) && !rm_.IsValid() && (offset_ == 0);
|
||||
}
|
||||
bool IsRegisterOffset() const {
|
||||
return (GetAddrMode() == Offset) && rm_.IsValid() && shift_.IsLSL() &&
|
||||
(shift_amount_ == 0);
|
||||
}
|
||||
bool IsShiftedRegisterOffset() const {
|
||||
return (GetAddrMode() == Offset) && rm_.IsValid();
|
||||
}
|
||||
uint32_t GetTypeEncodingValue() const {
|
||||
return shift_.IsRRX() ? kRRXEncodedValue : shift_.GetValue();
|
||||
}
|
||||
bool IsOffset() const { return GetAddrMode() == Offset; }
|
||||
bool IsPreIndex() const { return GetAddrMode() == PreIndex; }
|
||||
bool IsPostIndex() const { return GetAddrMode() == PostIndex; }
|
||||
bool IsShiftValid() const { return shift_.IsValidAmount(shift_amount_); }
|
||||
|
||||
private:
|
||||
static const int kMemOperandRegisterOnly = 0x1000;
|
||||
static const int kMemOperandAddrModeMask = 0xfff;
|
||||
void CheckShift() {
|
||||
#ifdef VIXL_DEBUG
|
||||
// Disallow any zero shift other than RRX #0 and LSL #0 .
|
||||
if ((shift_amount_ == 0) && shift_.IsRRX()) return;
|
||||
if ((shift_amount_ == 0) && !shift_.IsLSL()) {
|
||||
VIXL_ABORT_WITH_MSG(
|
||||
"A shift by 0 is only accepted in "
|
||||
"the case of lsl and will be treated as "
|
||||
"no shift.\n");
|
||||
}
|
||||
switch (shift_.GetType()) {
|
||||
case LSL:
|
||||
VIXL_ASSERT(shift_amount_ <= 31);
|
||||
break;
|
||||
case ROR:
|
||||
VIXL_ASSERT(shift_amount_ <= 31);
|
||||
break;
|
||||
case LSR:
|
||||
case ASR:
|
||||
VIXL_ASSERT(shift_amount_ <= 32);
|
||||
break;
|
||||
case RRX:
|
||||
default:
|
||||
VIXL_UNREACHABLE();
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
Register rn_;
|
||||
int32_t offset_;
|
||||
Sign sign_;
|
||||
Register rm_;
|
||||
Shift shift_;
|
||||
uint32_t shift_amount_;
|
||||
uint32_t addrmode_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const MemOperand& operand);
|
||||
|
||||
class AlignedMemOperand : public MemOperand {
|
||||
public:
|
||||
AlignedMemOperand(Register rn, Alignment align, AddrMode addrmode = Offset)
|
||||
: MemOperand(rn, addrmode), align_(align) {
|
||||
VIXL_ASSERT(addrmode != PreIndex);
|
||||
}
|
||||
|
||||
AlignedMemOperand(Register rn,
|
||||
Alignment align,
|
||||
Register rm,
|
||||
AddrMode addrmode)
|
||||
: MemOperand(rn, rm, addrmode), align_(align) {
|
||||
VIXL_ASSERT(addrmode != PreIndex);
|
||||
}
|
||||
|
||||
Alignment GetAlignment() const { return align_; }
|
||||
|
||||
private:
|
||||
Alignment align_;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& os, const AlignedMemOperand& operand);
|
||||
|
||||
} // namespace aarch32
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH32_OPERANDS_AARCH32_H_
|
167
externals/vixl/vixl/src/aarch64/abi-aarch64.h
vendored
Normal file
167
externals/vixl/vixl/src/aarch64/abi-aarch64.h
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// The ABI features are only supported with C++11 or later.
|
||||
#if __cplusplus >= 201103L
|
||||
// This should not be defined manually.
|
||||
#define VIXL_HAS_ABI_SUPPORT
|
||||
#elif defined(VIXL_HAS_ABI_SUPPORT)
|
||||
#error "The ABI support requires C++11 or later."
|
||||
#endif
|
||||
|
||||
#ifdef VIXL_HAS_ABI_SUPPORT
|
||||
|
||||
#ifndef VIXL_AARCH64_ABI_AARCH64_H_
|
||||
#define VIXL_AARCH64_ABI_AARCH64_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <type_traits>
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
#include "operands-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// Class describing the AArch64 procedure call standard, as defined in "ARM
|
||||
// Procedure Call Standard for the ARM 64-bit Architecture (AArch64)",
|
||||
// release 1.0 (AAPCS below).
|
||||
//
|
||||
// The stages in the comments match the description in that document.
|
||||
//
|
||||
// Stage B does not apply to arguments handled by this class.
|
||||
class ABI {
|
||||
public:
|
||||
explicit ABI(Register stack_pointer = sp) : stack_pointer_(stack_pointer) {
|
||||
// Stage A - Initialization
|
||||
Reset();
|
||||
}
|
||||
|
||||
void Reset() {
|
||||
NGRN_ = 0;
|
||||
NSRN_ = 0;
|
||||
stack_offset_ = 0;
|
||||
}
|
||||
|
||||
int GetStackSpaceRequired() { return stack_offset_; }
|
||||
|
||||
// The logic is described in section 5.5 of the AAPCS.
|
||||
template <typename T>
|
||||
GenericOperand GetReturnGenericOperand() const {
|
||||
ABI abi(stack_pointer_);
|
||||
GenericOperand result = abi.GetNextParameterGenericOperand<T>();
|
||||
VIXL_ASSERT(result.IsCPURegister());
|
||||
return result;
|
||||
}
|
||||
|
||||
// The logic is described in section 5.4.2 of the AAPCS.
|
||||
// The `GenericOperand` returned describes the location reserved for the
|
||||
// argument from the point of view of the callee.
|
||||
template <typename T>
|
||||
GenericOperand GetNextParameterGenericOperand() {
|
||||
const bool is_floating_point_type = std::is_floating_point<T>::value;
|
||||
const bool is_integral_type =
|
||||
std::is_integral<T>::value || std::is_enum<T>::value;
|
||||
const bool is_pointer_type = std::is_pointer<T>::value;
|
||||
int type_alignment = std::alignment_of<T>::value;
|
||||
|
||||
// We only support basic types.
|
||||
VIXL_ASSERT(is_floating_point_type || is_integral_type || is_pointer_type);
|
||||
|
||||
// To ensure we get the correct type of operand when simulating on a 32-bit
|
||||
// host, force the size of pointer types to the native AArch64 pointer size.
|
||||
unsigned size = is_pointer_type ? 8 : sizeof(T);
|
||||
// The size of the 'operand' reserved for the argument.
|
||||
unsigned operand_size = AlignUp(size, kWRegSizeInBytes);
|
||||
if (size > 8) {
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return GenericOperand();
|
||||
}
|
||||
|
||||
// Stage C.1
|
||||
if (is_floating_point_type && (NSRN_ < 8)) {
|
||||
return GenericOperand(VRegister(NSRN_++, size * kBitsPerByte));
|
||||
}
|
||||
// Stages C.2, C.3, and C.4: Unsupported. Caught by the assertions above.
|
||||
// Stages C.5 and C.6
|
||||
if (is_floating_point_type) {
|
||||
VIXL_STATIC_ASSERT(
|
||||
!is_floating_point_type ||
|
||||
(std::is_same<T, float>::value || std::is_same<T, double>::value));
|
||||
int offset = stack_offset_;
|
||||
stack_offset_ += 8;
|
||||
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
|
||||
}
|
||||
// Stage C.7
|
||||
if ((is_integral_type || is_pointer_type) && (size <= 8) && (NGRN_ < 8)) {
|
||||
return GenericOperand(Register(NGRN_++, operand_size * kBitsPerByte));
|
||||
}
|
||||
// Stage C.8
|
||||
if (type_alignment == 16) {
|
||||
NGRN_ = AlignUp(NGRN_, 2);
|
||||
}
|
||||
// Stage C.9
|
||||
if (is_integral_type && (size == 16) && (NGRN_ < 7)) {
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return GenericOperand();
|
||||
}
|
||||
// Stage C.10: Unsupported. Caught by the assertions above.
|
||||
// Stage C.11
|
||||
NGRN_ = 8;
|
||||
// Stage C.12
|
||||
stack_offset_ = AlignUp(stack_offset_, std::max(type_alignment, 8));
|
||||
// Stage C.13: Unsupported. Caught by the assertions above.
|
||||
// Stage C.14
|
||||
VIXL_ASSERT(size <= 8u);
|
||||
size = std::max(size, 8u);
|
||||
int offset = stack_offset_;
|
||||
stack_offset_ += size;
|
||||
return GenericOperand(MemOperand(stack_pointer_, offset), operand_size);
|
||||
}
|
||||
|
||||
private:
|
||||
Register stack_pointer_;
|
||||
// Next General-purpose Register Number.
|
||||
int NGRN_;
|
||||
// Next SIMD and Floating-point Register Number.
|
||||
int NSRN_;
|
||||
// The acronym "NSAA" used in the standard refers to the "Next Stacked
|
||||
// Argument Address". Here we deal with offsets from the stack pointer.
|
||||
int stack_offset_;
|
||||
};
|
||||
|
||||
template <>
|
||||
inline GenericOperand ABI::GetReturnGenericOperand<void>() const {
|
||||
return GenericOperand();
|
||||
}
|
||||
}
|
||||
} // namespace vixl::aarch64
|
||||
|
||||
#endif // VIXL_AARCH64_ABI_AARCH64_H_
|
||||
|
||||
#endif // VIXL_HAS_ABI_SUPPORT
|
6279
externals/vixl/vixl/src/aarch64/assembler-aarch64.cc
vendored
Normal file
6279
externals/vixl/vixl/src/aarch64/assembler-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
7073
externals/vixl/vixl/src/aarch64/assembler-aarch64.h
vendored
Normal file
7073
externals/vixl/vixl/src/aarch64/assembler-aarch64.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
6489
externals/vixl/vixl/src/aarch64/assembler-sve-aarch64.cc
vendored
Normal file
6489
externals/vixl/vixl/src/aarch64/assembler-sve-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
4439
externals/vixl/vixl/src/aarch64/constants-aarch64.h
vendored
Normal file
4439
externals/vixl/vixl/src/aarch64/constants-aarch64.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
489
externals/vixl/vixl/src/aarch64/cpu-aarch64.cc
vendored
Normal file
489
externals/vixl/vixl/src/aarch64/cpu-aarch64.cc
vendored
Normal file
|
@ -0,0 +1,489 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#if defined(__aarch64__) && (defined(__ANDROID__) || defined(__linux__))
|
||||
#include <sys/auxv.h>
|
||||
#define VIXL_USE_LINUX_HWCAP 1
|
||||
#endif
|
||||
|
||||
#include "../utils-vixl.h"
|
||||
|
||||
#include "cpu-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
|
||||
const IDRegister::Field AA64PFR0::kFP(16, Field::kSigned);
|
||||
const IDRegister::Field AA64PFR0::kAdvSIMD(20, Field::kSigned);
|
||||
const IDRegister::Field AA64PFR0::kRAS(28);
|
||||
const IDRegister::Field AA64PFR0::kSVE(32);
|
||||
const IDRegister::Field AA64PFR0::kDIT(48);
|
||||
const IDRegister::Field AA64PFR0::kCSV2(56);
|
||||
const IDRegister::Field AA64PFR0::kCSV3(60);
|
||||
|
||||
const IDRegister::Field AA64PFR1::kBT(0);
|
||||
const IDRegister::Field AA64PFR1::kSSBS(4);
|
||||
const IDRegister::Field AA64PFR1::kMTE(8);
|
||||
|
||||
const IDRegister::Field AA64ISAR0::kAES(4);
|
||||
const IDRegister::Field AA64ISAR0::kSHA1(8);
|
||||
const IDRegister::Field AA64ISAR0::kSHA2(12);
|
||||
const IDRegister::Field AA64ISAR0::kCRC32(16);
|
||||
const IDRegister::Field AA64ISAR0::kAtomic(20);
|
||||
const IDRegister::Field AA64ISAR0::kRDM(28);
|
||||
const IDRegister::Field AA64ISAR0::kSHA3(32);
|
||||
const IDRegister::Field AA64ISAR0::kSM3(36);
|
||||
const IDRegister::Field AA64ISAR0::kSM4(40);
|
||||
const IDRegister::Field AA64ISAR0::kDP(44);
|
||||
const IDRegister::Field AA64ISAR0::kFHM(48);
|
||||
const IDRegister::Field AA64ISAR0::kTS(52);
|
||||
const IDRegister::Field AA64ISAR0::kRNDR(60);
|
||||
|
||||
const IDRegister::Field AA64ISAR1::kDPB(0);
|
||||
const IDRegister::Field AA64ISAR1::kAPA(4);
|
||||
const IDRegister::Field AA64ISAR1::kAPI(8);
|
||||
const IDRegister::Field AA64ISAR1::kJSCVT(12);
|
||||
const IDRegister::Field AA64ISAR1::kFCMA(16);
|
||||
const IDRegister::Field AA64ISAR1::kLRCPC(20);
|
||||
const IDRegister::Field AA64ISAR1::kGPA(24);
|
||||
const IDRegister::Field AA64ISAR1::kGPI(28);
|
||||
const IDRegister::Field AA64ISAR1::kFRINTTS(32);
|
||||
const IDRegister::Field AA64ISAR1::kSB(36);
|
||||
const IDRegister::Field AA64ISAR1::kSPECRES(40);
|
||||
const IDRegister::Field AA64ISAR1::kBF16(44);
|
||||
const IDRegister::Field AA64ISAR1::kDGH(48);
|
||||
const IDRegister::Field AA64ISAR1::kI8MM(52);
|
||||
|
||||
const IDRegister::Field AA64MMFR1::kLO(16);
|
||||
|
||||
const IDRegister::Field AA64MMFR2::kAT(32);
|
||||
|
||||
const IDRegister::Field AA64ZFR0::kBF16(20);
|
||||
const IDRegister::Field AA64ZFR0::kI8MM(44);
|
||||
const IDRegister::Field AA64ZFR0::kF32MM(52);
|
||||
const IDRegister::Field AA64ZFR0::kF64MM(56);
|
||||
|
||||
CPUFeatures AA64PFR0::GetCPUFeatures() const {
|
||||
CPUFeatures f;
|
||||
if (Get(kFP) >= 0) f.Combine(CPUFeatures::kFP);
|
||||
if (Get(kFP) >= 1) f.Combine(CPUFeatures::kFPHalf);
|
||||
if (Get(kAdvSIMD) >= 0) f.Combine(CPUFeatures::kNEON);
|
||||
if (Get(kAdvSIMD) >= 1) f.Combine(CPUFeatures::kNEONHalf);
|
||||
if (Get(kRAS) >= 1) f.Combine(CPUFeatures::kRAS);
|
||||
if (Get(kSVE) >= 1) f.Combine(CPUFeatures::kSVE);
|
||||
if (Get(kDIT) >= 1) f.Combine(CPUFeatures::kDIT);
|
||||
if (Get(kCSV2) >= 1) f.Combine(CPUFeatures::kCSV2);
|
||||
if (Get(kCSV2) >= 2) f.Combine(CPUFeatures::kSCXTNUM);
|
||||
if (Get(kCSV3) >= 1) f.Combine(CPUFeatures::kCSV3);
|
||||
return f;
|
||||
}
|
||||
|
||||
CPUFeatures AA64PFR1::GetCPUFeatures() const {
|
||||
CPUFeatures f;
|
||||
if (Get(kBT) >= 1) f.Combine(CPUFeatures::kBTI);
|
||||
if (Get(kSSBS) >= 1) f.Combine(CPUFeatures::kSSBS);
|
||||
if (Get(kSSBS) >= 2) f.Combine(CPUFeatures::kSSBSControl);
|
||||
if (Get(kMTE) >= 1) f.Combine(CPUFeatures::kMTEInstructions);
|
||||
if (Get(kMTE) >= 2) f.Combine(CPUFeatures::kMTE);
|
||||
return f;
|
||||
}
|
||||
|
||||
CPUFeatures AA64ISAR0::GetCPUFeatures() const {
|
||||
CPUFeatures f;
|
||||
if (Get(kAES) >= 1) f.Combine(CPUFeatures::kAES);
|
||||
if (Get(kAES) >= 2) f.Combine(CPUFeatures::kPmull1Q);
|
||||
if (Get(kSHA1) >= 1) f.Combine(CPUFeatures::kSHA1);
|
||||
if (Get(kSHA2) >= 1) f.Combine(CPUFeatures::kSHA2);
|
||||
if (Get(kSHA2) >= 2) f.Combine(CPUFeatures::kSHA512);
|
||||
if (Get(kCRC32) >= 1) f.Combine(CPUFeatures::kCRC32);
|
||||
if (Get(kAtomic) >= 1) f.Combine(CPUFeatures::kAtomics);
|
||||
if (Get(kRDM) >= 1) f.Combine(CPUFeatures::kRDM);
|
||||
if (Get(kSHA3) >= 1) f.Combine(CPUFeatures::kSHA3);
|
||||
if (Get(kSM3) >= 1) f.Combine(CPUFeatures::kSM3);
|
||||
if (Get(kSM4) >= 1) f.Combine(CPUFeatures::kSM4);
|
||||
if (Get(kDP) >= 1) f.Combine(CPUFeatures::kDotProduct);
|
||||
if (Get(kFHM) >= 1) f.Combine(CPUFeatures::kFHM);
|
||||
if (Get(kTS) >= 1) f.Combine(CPUFeatures::kFlagM);
|
||||
if (Get(kTS) >= 2) f.Combine(CPUFeatures::kAXFlag);
|
||||
if (Get(kRNDR) >= 1) f.Combine(CPUFeatures::kRNG);
|
||||
return f;
|
||||
}
|
||||
|
||||
CPUFeatures AA64ISAR1::GetCPUFeatures() const {
|
||||
CPUFeatures f;
|
||||
if (Get(kDPB) >= 1) f.Combine(CPUFeatures::kDCPoP);
|
||||
if (Get(kDPB) >= 2) f.Combine(CPUFeatures::kDCCVADP);
|
||||
if (Get(kJSCVT) >= 1) f.Combine(CPUFeatures::kJSCVT);
|
||||
if (Get(kFCMA) >= 1) f.Combine(CPUFeatures::kFcma);
|
||||
if (Get(kLRCPC) >= 1) f.Combine(CPUFeatures::kRCpc);
|
||||
if (Get(kLRCPC) >= 2) f.Combine(CPUFeatures::kRCpcImm);
|
||||
if (Get(kFRINTTS) >= 1) f.Combine(CPUFeatures::kFrintToFixedSizedInt);
|
||||
if (Get(kSB) >= 1) f.Combine(CPUFeatures::kSB);
|
||||
if (Get(kSPECRES) >= 1) f.Combine(CPUFeatures::kSPECRES);
|
||||
if (Get(kBF16) >= 1) f.Combine(CPUFeatures::kBF16);
|
||||
if (Get(kDGH) >= 1) f.Combine(CPUFeatures::kDGH);
|
||||
if (Get(kI8MM) >= 1) f.Combine(CPUFeatures::kI8MM);
|
||||
|
||||
// Only one of these fields should be non-zero, but they have the same
|
||||
// encodings, so merge the logic.
|
||||
int apx = std::max(Get(kAPI), Get(kAPA));
|
||||
if (apx >= 1) {
|
||||
f.Combine(CPUFeatures::kPAuth);
|
||||
// APA (rather than API) indicates QARMA.
|
||||
if (Get(kAPA) >= 1) f.Combine(CPUFeatures::kPAuthQARMA);
|
||||
if (apx == 0b0010) f.Combine(CPUFeatures::kPAuthEnhancedPAC);
|
||||
if (apx >= 0b0011) f.Combine(CPUFeatures::kPAuthEnhancedPAC2);
|
||||
if (apx >= 0b0100) f.Combine(CPUFeatures::kPAuthFPAC);
|
||||
if (apx >= 0b0101) f.Combine(CPUFeatures::kPAuthFPACCombined);
|
||||
}
|
||||
|
||||
if (Get(kGPI) >= 1) f.Combine(CPUFeatures::kPAuthGeneric);
|
||||
if (Get(kGPA) >= 1) {
|
||||
f.Combine(CPUFeatures::kPAuthGeneric, CPUFeatures::kPAuthGenericQARMA);
|
||||
}
|
||||
return f;
|
||||
}
|
||||
|
||||
CPUFeatures AA64MMFR1::GetCPUFeatures() const {
|
||||
CPUFeatures f;
|
||||
if (Get(kLO) >= 1) f.Combine(CPUFeatures::kLORegions);
|
||||
return f;
|
||||
}
|
||||
|
||||
CPUFeatures AA64MMFR2::GetCPUFeatures() const {
|
||||
CPUFeatures f;
|
||||
if (Get(kAT) >= 1) f.Combine(CPUFeatures::kUSCAT);
|
||||
return f;
|
||||
}
|
||||
|
||||
CPUFeatures AA64ZFR0::GetCPUFeatures() const {
|
||||
// This register is only available with SVE, but reads-as-zero in its absence,
|
||||
// so it's always safe to read it.
|
||||
CPUFeatures f;
|
||||
if (Get(kF64MM) >= 1) f.Combine(CPUFeatures::kSVEF64MM);
|
||||
if (Get(kF32MM) >= 1) f.Combine(CPUFeatures::kSVEF32MM);
|
||||
if (Get(kI8MM) >= 1) f.Combine(CPUFeatures::kSVEI8MM);
|
||||
if (Get(kBF16) >= 1) f.Combine(CPUFeatures::kSVEBF16);
|
||||
return f;
|
||||
}
|
||||
|
||||
int IDRegister::Get(IDRegister::Field field) const {
|
||||
int msb = field.GetMsb();
|
||||
int lsb = field.GetLsb();
|
||||
VIXL_STATIC_ASSERT(static_cast<size_t>(Field::kMaxWidthInBits) <
|
||||
(sizeof(int) * kBitsPerByte));
|
||||
switch (field.GetType()) {
|
||||
case Field::kSigned:
|
||||
return static_cast<int>(ExtractSignedBitfield64(msb, lsb, value_));
|
||||
case Field::kUnsigned:
|
||||
return static_cast<int>(ExtractUnsignedBitfield64(msb, lsb, value_));
|
||||
}
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
}
|
||||
|
||||
CPUFeatures CPU::InferCPUFeaturesFromIDRegisters() {
|
||||
CPUFeatures f;
|
||||
#define VIXL_COMBINE_ID_REG(NAME, MRS_ARG) \
|
||||
f.Combine(Read##NAME().GetCPUFeatures());
|
||||
VIXL_AARCH64_ID_REG_LIST(VIXL_COMBINE_ID_REG)
|
||||
#undef VIXL_COMBINE_ID_REG
|
||||
return f;
|
||||
}
|
||||
|
||||
CPUFeatures CPU::InferCPUFeaturesFromOS(
|
||||
CPUFeatures::QueryIDRegistersOption option) {
|
||||
CPUFeatures features;
|
||||
|
||||
#if VIXL_USE_LINUX_HWCAP
|
||||
// Map each set bit onto a feature. Ideally, we'd use HWCAP_* macros rather
|
||||
// than explicit bits, but explicit bits allow us to identify features that
|
||||
// the toolchain doesn't know about.
|
||||
static const CPUFeatures::Feature kFeatureBits[] =
|
||||
{// Bits 0-7
|
||||
CPUFeatures::kFP,
|
||||
CPUFeatures::kNEON,
|
||||
CPUFeatures::kNone, // "EVTSTRM", which VIXL doesn't track.
|
||||
CPUFeatures::kAES,
|
||||
CPUFeatures::kPmull1Q,
|
||||
CPUFeatures::kSHA1,
|
||||
CPUFeatures::kSHA2,
|
||||
CPUFeatures::kCRC32,
|
||||
// Bits 8-15
|
||||
CPUFeatures::kAtomics,
|
||||
CPUFeatures::kFPHalf,
|
||||
CPUFeatures::kNEONHalf,
|
||||
CPUFeatures::kIDRegisterEmulation,
|
||||
CPUFeatures::kRDM,
|
||||
CPUFeatures::kJSCVT,
|
||||
CPUFeatures::kFcma,
|
||||
CPUFeatures::kRCpc,
|
||||
// Bits 16-23
|
||||
CPUFeatures::kDCPoP,
|
||||
CPUFeatures::kSHA3,
|
||||
CPUFeatures::kSM3,
|
||||
CPUFeatures::kSM4,
|
||||
CPUFeatures::kDotProduct,
|
||||
CPUFeatures::kSHA512,
|
||||
CPUFeatures::kSVE,
|
||||
CPUFeatures::kFHM,
|
||||
// Bits 24-31
|
||||
CPUFeatures::kDIT,
|
||||
CPUFeatures::kUSCAT,
|
||||
CPUFeatures::kRCpcImm,
|
||||
CPUFeatures::kFlagM,
|
||||
CPUFeatures::kSSBSControl,
|
||||
CPUFeatures::kSB,
|
||||
CPUFeatures::kPAuth,
|
||||
CPUFeatures::kPAuthGeneric,
|
||||
// Bits 32-39
|
||||
CPUFeatures::kDCCVADP,
|
||||
CPUFeatures::kNone, // "sve2"
|
||||
CPUFeatures::kNone, // "sveaes"
|
||||
CPUFeatures::kNone, // "svepmull"
|
||||
CPUFeatures::kNone, // "svebitperm"
|
||||
CPUFeatures::kNone, // "svesha3"
|
||||
CPUFeatures::kNone, // "svesm4"
|
||||
CPUFeatures::kFrintToFixedSizedInt,
|
||||
// Bits 40-47
|
||||
CPUFeatures::kSVEI8MM,
|
||||
CPUFeatures::kSVEF32MM,
|
||||
CPUFeatures::kSVEF64MM,
|
||||
CPUFeatures::kSVEBF16,
|
||||
CPUFeatures::kI8MM,
|
||||
CPUFeatures::kBF16,
|
||||
CPUFeatures::kDGH,
|
||||
CPUFeatures::kRNG,
|
||||
// Bits 48+
|
||||
CPUFeatures::kBTI};
|
||||
|
||||
uint64_t hwcap_low32 = getauxval(AT_HWCAP);
|
||||
uint64_t hwcap_high32 = getauxval(AT_HWCAP2);
|
||||
VIXL_ASSERT(IsUint32(hwcap_low32));
|
||||
VIXL_ASSERT(IsUint32(hwcap_high32));
|
||||
uint64_t hwcap = hwcap_low32 | (hwcap_high32 << 32);
|
||||
|
||||
VIXL_STATIC_ASSERT(ArrayLength(kFeatureBits) < 64);
|
||||
for (size_t i = 0; i < ArrayLength(kFeatureBits); i++) {
|
||||
if (hwcap & (UINT64_C(1) << i)) features.Combine(kFeatureBits[i]);
|
||||
}
|
||||
#endif // VIXL_USE_LINUX_HWCAP
|
||||
|
||||
if ((option == CPUFeatures::kQueryIDRegistersIfAvailable) &&
|
||||
(features.Has(CPUFeatures::kIDRegisterEmulation))) {
|
||||
features.Combine(InferCPUFeaturesFromIDRegisters());
|
||||
}
|
||||
return features;
|
||||
}
|
||||
|
||||
|
||||
#ifdef __aarch64__
|
||||
#define VIXL_READ_ID_REG(NAME, MRS_ARG) \
|
||||
NAME CPU::Read##NAME() { \
|
||||
uint64_t value = 0; \
|
||||
__asm__("mrs %0, " MRS_ARG : "=r"(value)); \
|
||||
return NAME(value); \
|
||||
}
|
||||
#else // __aarch64__
|
||||
#define VIXL_READ_ID_REG(NAME, MRS_ARG) \
|
||||
NAME CPU::Read##NAME() { \
|
||||
VIXL_UNREACHABLE(); \
|
||||
return NAME(0); \
|
||||
}
|
||||
#endif // __aarch64__
|
||||
|
||||
VIXL_AARCH64_ID_REG_LIST(VIXL_READ_ID_REG)
|
||||
|
||||
#undef VIXL_READ_ID_REG
|
||||
|
||||
|
||||
// Initialise to smallest possible cache size.
|
||||
unsigned CPU::dcache_line_size_ = 1;
|
||||
unsigned CPU::icache_line_size_ = 1;
|
||||
|
||||
|
||||
// Currently computes I and D cache line size.
|
||||
void CPU::SetUp() {
|
||||
uint32_t cache_type_register = GetCacheType();
|
||||
|
||||
// The cache type register holds information about the caches, including I
|
||||
// D caches line size.
|
||||
static const int kDCacheLineSizeShift = 16;
|
||||
static const int kICacheLineSizeShift = 0;
|
||||
static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
|
||||
static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
|
||||
|
||||
// The cache type register holds the size of the I and D caches in words as
|
||||
// a power of two.
|
||||
uint32_t dcache_line_size_power_of_two =
|
||||
(cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
|
||||
uint32_t icache_line_size_power_of_two =
|
||||
(cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
|
||||
|
||||
dcache_line_size_ = 4 << dcache_line_size_power_of_two;
|
||||
icache_line_size_ = 4 << icache_line_size_power_of_two;
|
||||
}
|
||||
|
||||
|
||||
uint32_t CPU::GetCacheType() {
|
||||
#ifdef __aarch64__
|
||||
uint64_t cache_type_register;
|
||||
// Copy the content of the cache type register to a core register.
|
||||
__asm__ __volatile__("mrs %[ctr], ctr_el0" // NOLINT(runtime/references)
|
||||
: [ctr] "=r"(cache_type_register));
|
||||
VIXL_ASSERT(IsUint32(cache_type_register));
|
||||
return static_cast<uint32_t>(cache_type_register);
|
||||
#else
|
||||
// This will lead to a cache with 1 byte long lines, which is fine since
|
||||
// neither EnsureIAndDCacheCoherency nor the simulator will need this
|
||||
// information.
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
// Query the SVE vector length. This requires CPUFeatures::kSVE.
|
||||
int CPU::ReadSVEVectorLengthInBits() {
|
||||
#ifdef __aarch64__
|
||||
uint64_t vl;
|
||||
// To support compilers that don't understand `rdvl`, encode the value
|
||||
// directly and move it manually.
|
||||
__asm__(
|
||||
" .word 0x04bf5100\n" // rdvl x0, #8
|
||||
" mov %[vl], x0\n"
|
||||
: [vl] "=r"(vl)
|
||||
:
|
||||
: "x0");
|
||||
VIXL_ASSERT(vl <= INT_MAX);
|
||||
return static_cast<int>(vl);
|
||||
#else
|
||||
VIXL_UNREACHABLE();
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void CPU::EnsureIAndDCacheCoherency(void *address, size_t length) {
|
||||
#ifdef __aarch64__
|
||||
// Implement the cache synchronisation for all targets where AArch64 is the
|
||||
// host, even if we're building the simulator for an AAarch64 host. This
|
||||
// allows for cases where the user wants to simulate code as well as run it
|
||||
// natively.
|
||||
|
||||
if (length == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// The code below assumes user space cache operations are allowed.
|
||||
|
||||
// Work out the line sizes for each cache, and use them to determine the
|
||||
// start addresses.
|
||||
uintptr_t start = reinterpret_cast<uintptr_t>(address);
|
||||
uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
|
||||
uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
|
||||
uintptr_t dline = start & ~(dsize - 1);
|
||||
uintptr_t iline = start & ~(isize - 1);
|
||||
|
||||
// Cache line sizes are always a power of 2.
|
||||
VIXL_ASSERT(IsPowerOf2(dsize));
|
||||
VIXL_ASSERT(IsPowerOf2(isize));
|
||||
uintptr_t end = start + length;
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
// Clean each line of the D cache containing the target data.
|
||||
//
|
||||
// dc : Data Cache maintenance
|
||||
// c : Clean
|
||||
// va : by (Virtual) Address
|
||||
// u : to the point of Unification
|
||||
// The point of unification for a processor is the point by which the
|
||||
// instruction and data caches are guaranteed to see the same copy of a
|
||||
// memory location. See ARM DDI 0406B page B2-12 for more information.
|
||||
" dc cvau, %[dline]\n"
|
||||
:
|
||||
: [dline] "r"(dline)
|
||||
// This code does not write to memory, but the "memory" dependency
|
||||
// prevents GCC from reordering the code.
|
||||
: "memory");
|
||||
dline += dsize;
|
||||
} while (dline < end);
|
||||
|
||||
__asm__ __volatile__(
|
||||
// Make sure that the data cache operations (above) complete before the
|
||||
// instruction cache operations (below).
|
||||
//
|
||||
// dsb : Data Synchronisation Barrier
|
||||
// ish : Inner SHareable domain
|
||||
//
|
||||
// The point of unification for an Inner Shareable shareability domain is
|
||||
// the point by which the instruction and data caches of all the
|
||||
// processors
|
||||
// in that Inner Shareable shareability domain are guaranteed to see the
|
||||
// same copy of a memory location. See ARM DDI 0406B page B2-12 for more
|
||||
// information.
|
||||
" dsb ish\n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
// Invalidate each line of the I cache containing the target data.
|
||||
//
|
||||
// ic : Instruction Cache maintenance
|
||||
// i : Invalidate
|
||||
// va : by Address
|
||||
// u : to the point of Unification
|
||||
" ic ivau, %[iline]\n"
|
||||
:
|
||||
: [iline] "r"(iline)
|
||||
: "memory");
|
||||
iline += isize;
|
||||
} while (iline < end);
|
||||
|
||||
__asm__ __volatile__(
|
||||
// Make sure that the instruction cache operations (above) take effect
|
||||
// before the isb (below).
|
||||
" dsb ish\n"
|
||||
|
||||
// Ensure that any instructions already in the pipeline are discarded and
|
||||
// reloaded from the new data.
|
||||
// isb : Instruction Synchronisation Barrier
|
||||
" isb\n"
|
||||
:
|
||||
:
|
||||
: "memory");
|
||||
#else
|
||||
// If the host isn't AArch64, we must be using the simulator, so this function
|
||||
// doesn't have to do anything.
|
||||
USE(address, length);
|
||||
#endif
|
||||
}
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
282
externals/vixl/vixl/src/aarch64/cpu-aarch64.h
vendored
Normal file
282
externals/vixl/vixl/src/aarch64/cpu-aarch64.h
vendored
Normal file
|
@ -0,0 +1,282 @@
|
|||
// Copyright 2014, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_CPU_AARCH64_H
|
||||
#define VIXL_CPU_AARCH64_H
|
||||
|
||||
#include "../cpu-features.h"
|
||||
#include "../globals-vixl.h"
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
|
||||
#ifndef VIXL_INCLUDE_TARGET_AARCH64
|
||||
// The supporting .cc file is only compiled when the A64 target is selected.
|
||||
// Throw an explicit error now to avoid a harder-to-debug linker error later.
|
||||
//
|
||||
// These helpers _could_ work on any AArch64 host, even when generating AArch32
|
||||
// code, but we don't support this because the available features may differ
|
||||
// between AArch32 and AArch64 on the same platform, so basing AArch32 code
|
||||
// generation on aarch64::CPU features is probably broken.
|
||||
#error cpu-aarch64.h requires VIXL_INCLUDE_TARGET_AARCH64 (scons target=a64).
|
||||
#endif
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// A CPU ID register, for use with CPUFeatures::kIDRegisterEmulation. Fields
|
||||
// specific to each register are described in relevant subclasses.
|
||||
class IDRegister {
|
||||
protected:
|
||||
explicit IDRegister(uint64_t value = 0) : value_(value) {}
|
||||
|
||||
class Field {
|
||||
public:
|
||||
enum Type { kUnsigned, kSigned };
|
||||
|
||||
// This needs to be constexpr so that fields have "constant initialisation".
|
||||
// This avoids initialisation order problems when these values are used to
|
||||
// (dynamically) initialise static variables, etc.
|
||||
explicit constexpr Field(int lsb, Type type = kUnsigned)
|
||||
: lsb_(lsb), type_(type) {}
|
||||
|
||||
static const int kMaxWidthInBits = 4;
|
||||
|
||||
int GetWidthInBits() const {
|
||||
// All current ID fields have four bits.
|
||||
return kMaxWidthInBits;
|
||||
}
|
||||
int GetLsb() const { return lsb_; }
|
||||
int GetMsb() const { return lsb_ + GetWidthInBits() - 1; }
|
||||
Type GetType() const { return type_; }
|
||||
|
||||
private:
|
||||
int lsb_;
|
||||
Type type_;
|
||||
};
|
||||
|
||||
public:
|
||||
// Extract the specified field, performing sign-extension for signed fields.
|
||||
// This allows us to implement the 'value >= number' detection mechanism
|
||||
// recommended by the Arm ARM, for both signed and unsigned fields.
|
||||
int Get(Field field) const;
|
||||
|
||||
private:
|
||||
uint64_t value_;
|
||||
};
|
||||
|
||||
class AA64PFR0 : public IDRegister {
|
||||
public:
|
||||
explicit AA64PFR0(uint64_t value) : IDRegister(value) {}
|
||||
|
||||
CPUFeatures GetCPUFeatures() const;
|
||||
|
||||
private:
|
||||
static const Field kFP;
|
||||
static const Field kAdvSIMD;
|
||||
static const Field kRAS;
|
||||
static const Field kSVE;
|
||||
static const Field kDIT;
|
||||
static const Field kCSV2;
|
||||
static const Field kCSV3;
|
||||
};
|
||||
|
||||
class AA64PFR1 : public IDRegister {
|
||||
public:
|
||||
explicit AA64PFR1(uint64_t value) : IDRegister(value) {}
|
||||
|
||||
CPUFeatures GetCPUFeatures() const;
|
||||
|
||||
private:
|
||||
static const Field kBT;
|
||||
static const Field kSSBS;
|
||||
static const Field kMTE;
|
||||
};
|
||||
|
||||
class AA64ISAR0 : public IDRegister {
|
||||
public:
|
||||
explicit AA64ISAR0(uint64_t value) : IDRegister(value) {}
|
||||
|
||||
CPUFeatures GetCPUFeatures() const;
|
||||
|
||||
private:
|
||||
static const Field kAES;
|
||||
static const Field kSHA1;
|
||||
static const Field kSHA2;
|
||||
static const Field kCRC32;
|
||||
static const Field kAtomic;
|
||||
static const Field kRDM;
|
||||
static const Field kSHA3;
|
||||
static const Field kSM3;
|
||||
static const Field kSM4;
|
||||
static const Field kDP;
|
||||
static const Field kFHM;
|
||||
static const Field kTS;
|
||||
static const Field kRNDR;
|
||||
};
|
||||
|
||||
class AA64ISAR1 : public IDRegister {
|
||||
public:
|
||||
explicit AA64ISAR1(uint64_t value) : IDRegister(value) {}
|
||||
|
||||
CPUFeatures GetCPUFeatures() const;
|
||||
|
||||
private:
|
||||
static const Field kDPB;
|
||||
static const Field kAPA;
|
||||
static const Field kAPI;
|
||||
static const Field kJSCVT;
|
||||
static const Field kFCMA;
|
||||
static const Field kLRCPC;
|
||||
static const Field kGPA;
|
||||
static const Field kGPI;
|
||||
static const Field kFRINTTS;
|
||||
static const Field kSB;
|
||||
static const Field kSPECRES;
|
||||
static const Field kBF16;
|
||||
static const Field kDGH;
|
||||
static const Field kI8MM;
|
||||
};
|
||||
|
||||
class AA64MMFR1 : public IDRegister {
|
||||
public:
|
||||
explicit AA64MMFR1(uint64_t value) : IDRegister(value) {}
|
||||
|
||||
CPUFeatures GetCPUFeatures() const;
|
||||
|
||||
private:
|
||||
static const Field kLO;
|
||||
};
|
||||
|
||||
class AA64MMFR2 : public IDRegister {
|
||||
public:
|
||||
explicit AA64MMFR2(uint64_t value) : IDRegister(value) {}
|
||||
|
||||
CPUFeatures GetCPUFeatures() const;
|
||||
|
||||
private:
|
||||
static const Field kAT;
|
||||
};
|
||||
|
||||
class AA64ZFR0 : public IDRegister {
|
||||
public:
|
||||
explicit AA64ZFR0(uint64_t value) : IDRegister(value) {}
|
||||
|
||||
CPUFeatures GetCPUFeatures() const;
|
||||
|
||||
private:
|
||||
static const Field kBF16;
|
||||
static const Field kI8MM;
|
||||
static const Field kF32MM;
|
||||
static const Field kF64MM;
|
||||
};
|
||||
|
||||
class CPU {
|
||||
public:
|
||||
// Initialise CPU support.
|
||||
static void SetUp();
|
||||
|
||||
// Ensures the data at a given address and with a given size is the same for
|
||||
// the I and D caches. I and D caches are not automatically coherent on ARM
|
||||
// so this operation is required before any dynamically generated code can
|
||||
// safely run.
|
||||
static void EnsureIAndDCacheCoherency(void *address, size_t length);
|
||||
|
||||
// Read and interpret the ID registers. This requires
|
||||
// CPUFeatures::kIDRegisterEmulation, and therefore cannot be called on
|
||||
// non-AArch64 platforms.
|
||||
static CPUFeatures InferCPUFeaturesFromIDRegisters();
|
||||
|
||||
// Read and interpret CPUFeatures reported by the OS. Failed queries (or
|
||||
// unsupported platforms) return an empty list. Note that this is
|
||||
// indistinguishable from a successful query on a platform that advertises no
|
||||
// features.
|
||||
//
|
||||
// Non-AArch64 hosts are considered to be unsupported platforms, and this
|
||||
// function returns an empty list.
|
||||
static CPUFeatures InferCPUFeaturesFromOS(
|
||||
CPUFeatures::QueryIDRegistersOption option =
|
||||
CPUFeatures::kQueryIDRegistersIfAvailable);
|
||||
|
||||
// Query the SVE vector length. This requires CPUFeatures::kSVE.
|
||||
static int ReadSVEVectorLengthInBits();
|
||||
|
||||
// Handle tagged pointers.
|
||||
template <typename T>
|
||||
static T SetPointerTag(T pointer, uint64_t tag) {
|
||||
VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
|
||||
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T),
|
||||
// and reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
|
||||
|
||||
raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
|
||||
return (T)raw;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static uint64_t GetPointerTag(T pointer) {
|
||||
// Use C-style casts to get static_cast behaviour for integral types (T),
|
||||
// and reinterpret_cast behaviour for other types.
|
||||
|
||||
uint64_t raw = (uint64_t)pointer;
|
||||
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
|
||||
|
||||
return (raw & kAddressTagMask) >> kAddressTagOffset;
|
||||
}
|
||||
|
||||
private:
|
||||
#define VIXL_AARCH64_ID_REG_LIST(V) \
|
||||
V(AA64PFR0, "ID_AA64PFR0_EL1") \
|
||||
V(AA64PFR1, "ID_AA64PFR1_EL1") \
|
||||
V(AA64ISAR0, "ID_AA64ISAR0_EL1") \
|
||||
V(AA64ISAR1, "ID_AA64ISAR1_EL1") \
|
||||
V(AA64MMFR1, "ID_AA64MMFR1_EL1") \
|
||||
/* These registers are RES0 in the baseline Arm8.0. We can always safely */ \
|
||||
/* read them, but some compilers don't accept the symbolic names. */ \
|
||||
V(AA64MMFR2, "S3_0_C0_C7_2") \
|
||||
V(AA64ZFR0, "S3_0_C0_C4_4")
|
||||
|
||||
#define VIXL_READ_ID_REG(NAME, MRS_ARG) static NAME Read##NAME();
|
||||
// On native AArch64 platforms, read the named CPU ID registers. These require
|
||||
// CPUFeatures::kIDRegisterEmulation, and should not be called on non-AArch64
|
||||
// platforms.
|
||||
VIXL_AARCH64_ID_REG_LIST(VIXL_READ_ID_REG)
|
||||
#undef VIXL_READ_ID_REG
|
||||
|
||||
// Return the content of the cache type register.
|
||||
static uint32_t GetCacheType();
|
||||
|
||||
// I and D cache line size in bytes.
|
||||
static unsigned icache_line_size_;
|
||||
static unsigned dcache_line_size_;
|
||||
};
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_CPU_AARCH64_H
|
1357
externals/vixl/vixl/src/aarch64/cpu-features-auditor-aarch64.cc
vendored
Normal file
1357
externals/vixl/vixl/src/aarch64/cpu-features-auditor-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
125
externals/vixl/vixl/src/aarch64/cpu-features-auditor-aarch64.h
vendored
Normal file
125
externals/vixl/vixl/src/aarch64/cpu-features-auditor-aarch64.h
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2018, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of Arm Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
|
||||
#define VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
|
||||
|
||||
#include <iostream>
|
||||
|
||||
#include "cpu-features.h"
|
||||
#include "decoder-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// This visitor records the CPU features that each decoded instruction requires.
|
||||
// It provides:
|
||||
// - the set of CPU features required by the most recently decoded instruction,
|
||||
// - a cumulative set of encountered CPU features,
|
||||
// - an optional list of 'available' CPU features.
|
||||
//
|
||||
// Primarily, this allows the Disassembler and Simulator to share the same CPU
|
||||
// features logic. However, it can be used standalone to scan code blocks for
|
||||
// CPU features.
|
||||
class CPUFeaturesAuditor : public DecoderVisitor {
|
||||
public:
|
||||
// Construction arguments:
|
||||
// - If a decoder is specified, the CPUFeaturesAuditor automatically
|
||||
// registers itself as a visitor. Otherwise, this can be done manually.
|
||||
//
|
||||
// - If an `available` features list is provided, it is used as a hint in
|
||||
// cases where instructions may be provided by multiple separate features.
|
||||
// An example of this is FP&SIMD loads and stores: some of these are used
|
||||
// in both FP and integer SIMD code. If exactly one of those features is
|
||||
// in `available` when one of these instructions is encountered, then the
|
||||
// auditor will record that feature. Otherwise, it will record _both_
|
||||
// features.
|
||||
explicit CPUFeaturesAuditor(
|
||||
Decoder* decoder, const CPUFeatures& available = CPUFeatures::None())
|
||||
: available_(available), decoder_(decoder) {
|
||||
if (decoder_ != NULL) decoder_->AppendVisitor(this);
|
||||
}
|
||||
|
||||
explicit CPUFeaturesAuditor(
|
||||
const CPUFeatures& available = CPUFeatures::None())
|
||||
: available_(available), decoder_(NULL) {}
|
||||
|
||||
virtual ~CPUFeaturesAuditor() {
|
||||
if (decoder_ != NULL) decoder_->RemoveVisitor(this);
|
||||
}
|
||||
|
||||
void ResetSeenFeatures() {
|
||||
seen_ = CPUFeatures::None();
|
||||
last_instruction_ = CPUFeatures::None();
|
||||
}
|
||||
|
||||
// Query or set available CPUFeatures.
|
||||
const CPUFeatures& GetAvailableFeatures() const { return available_; }
|
||||
void SetAvailableFeatures(const CPUFeatures& available) {
|
||||
available_ = available;
|
||||
}
|
||||
|
||||
// Query CPUFeatures seen since construction (or the last call to `Reset()`).
|
||||
const CPUFeatures& GetSeenFeatures() const { return seen_; }
|
||||
|
||||
// Query CPUFeatures from the last instruction visited by this auditor.
|
||||
const CPUFeatures& GetInstructionFeatures() const {
|
||||
return last_instruction_;
|
||||
}
|
||||
|
||||
bool InstructionIsAvailable() const {
|
||||
return available_.Has(last_instruction_);
|
||||
}
|
||||
|
||||
// The common CPUFeatures interface operates on the available_ list.
|
||||
CPUFeatures* GetCPUFeatures() { return &available_; }
|
||||
void SetCPUFeatures(const CPUFeatures& available) {
|
||||
SetAvailableFeatures(available);
|
||||
}
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) \
|
||||
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
private:
|
||||
class RecordInstructionFeaturesScope;
|
||||
|
||||
void LoadStoreHelper(const Instruction* instr);
|
||||
void LoadStorePairHelper(const Instruction* instr);
|
||||
|
||||
CPUFeatures seen_;
|
||||
CPUFeatures last_instruction_;
|
||||
CPUFeatures available_;
|
||||
|
||||
Decoder* decoder_;
|
||||
};
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_CPU_FEATURES_AUDITOR_AARCH64_H_
|
521
externals/vixl/vixl/src/aarch64/decoder-aarch64.cc
vendored
Normal file
521
externals/vixl/vixl/src/aarch64/decoder-aarch64.cc
vendored
Normal file
|
@ -0,0 +1,521 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <string>
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
#include "../utils-vixl.h"
|
||||
|
||||
#include "decoder-aarch64.h"
|
||||
#include "decoder-constants-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
void Decoder::Decode(const Instruction* instr) {
|
||||
std::list<DecoderVisitor*>::iterator it;
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) {
|
||||
VIXL_ASSERT((*it)->IsConstVisitor());
|
||||
}
|
||||
VIXL_ASSERT(compiled_decoder_root_ != NULL);
|
||||
compiled_decoder_root_->Decode(instr);
|
||||
}
|
||||
|
||||
void Decoder::Decode(Instruction* instr) {
|
||||
compiled_decoder_root_->Decode(const_cast<const Instruction*>(instr));
|
||||
}
|
||||
|
||||
void Decoder::AddDecodeNode(const DecodeNode& node) {
|
||||
decode_nodes_.insert(std::make_pair(node.GetName(), node));
|
||||
}
|
||||
|
||||
DecodeNode* Decoder::GetDecodeNode(std::string name) {
|
||||
if (decode_nodes_.count(name) != 1) {
|
||||
std::string msg = "Can't find decode node " + name + ".\n";
|
||||
VIXL_ABORT_WITH_MSG(msg.c_str());
|
||||
}
|
||||
return &decode_nodes_[name];
|
||||
}
|
||||
|
||||
void Decoder::ConstructDecodeGraph() {
|
||||
// Add all of the decoding nodes to the Decoder.
|
||||
for (unsigned i = 0; i < ArrayLength(kDecodeMapping); i++) {
|
||||
AddDecodeNode(DecodeNode(kDecodeMapping[i], this));
|
||||
}
|
||||
|
||||
// Add the visitor function wrapping nodes to the Decoder.
|
||||
for (unsigned i = 0; i < ArrayLength(kVisitorNodes); i++) {
|
||||
AddDecodeNode(DecodeNode(kVisitorNodes[i], this));
|
||||
}
|
||||
|
||||
// Compile the graph from the root.
|
||||
compiled_decoder_root_ = GetDecodeNode("Root")->Compile(this);
|
||||
}
|
||||
|
||||
void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
|
||||
visitors_.push_back(new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
|
||||
visitors_.push_front(new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor) {
|
||||
std::list<DecoderVisitor*>::iterator it;
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) {
|
||||
if (*it == registered_visitor) {
|
||||
visitors_.insert(it, new_visitor);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
VIXL_ASSERT(*it == registered_visitor);
|
||||
visitors_.insert(it, new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor) {
|
||||
std::list<DecoderVisitor*>::iterator it;
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) {
|
||||
if (*it == registered_visitor) {
|
||||
it++;
|
||||
visitors_.insert(it, new_visitor);
|
||||
return;
|
||||
}
|
||||
}
|
||||
// We reached the end of the list. The last element must be
|
||||
// registered_visitor.
|
||||
VIXL_ASSERT(*it == registered_visitor);
|
||||
visitors_.push_back(new_visitor);
|
||||
}
|
||||
|
||||
|
||||
void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
|
||||
visitors_.remove(visitor);
|
||||
}
|
||||
|
||||
#define DEFINE_VISITOR_CALLERS(A) \
|
||||
void Decoder::Visit##A(const Instruction* instr) { \
|
||||
VIXL_ASSERT(((A##FMask == 0) && (A##Fixed == 0)) || \
|
||||
(instr->Mask(A##FMask) == A##Fixed)); \
|
||||
std::list<DecoderVisitor*>::iterator it; \
|
||||
for (it = visitors_.begin(); it != visitors_.end(); it++) { \
|
||||
(*it)->Visit##A(instr); \
|
||||
} \
|
||||
}
|
||||
VISITOR_LIST(DEFINE_VISITOR_CALLERS)
|
||||
#undef DEFINE_VISITOR_CALLERS
|
||||
|
||||
void DecodeNode::SetSampledBits(const uint8_t* bits, int bit_count) {
|
||||
VIXL_ASSERT(!IsCompiled());
|
||||
|
||||
sampled_bits_.resize(bit_count);
|
||||
for (int i = 0; i < bit_count; i++) {
|
||||
sampled_bits_[i] = bits[i];
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<uint8_t> DecodeNode::GetSampledBits() const {
|
||||
return sampled_bits_;
|
||||
}
|
||||
|
||||
size_t DecodeNode::GetSampledBitsCount() const { return sampled_bits_.size(); }
|
||||
|
||||
void DecodeNode::AddPatterns(const DecodePattern* patterns) {
|
||||
VIXL_ASSERT(!IsCompiled());
|
||||
for (unsigned i = 0; i < kMaxDecodeMappings; i++) {
|
||||
// Empty string indicates end of patterns.
|
||||
if (patterns[i].pattern == NULL) break;
|
||||
VIXL_ASSERT((strlen(patterns[i].pattern) == GetSampledBitsCount()) ||
|
||||
(strcmp(patterns[i].pattern, "otherwise") == 0));
|
||||
pattern_table_.push_back(patterns[i]);
|
||||
}
|
||||
}
|
||||
|
||||
void DecodeNode::CompileNodeForBits(Decoder* decoder,
|
||||
std::string name,
|
||||
uint32_t bits) {
|
||||
DecodeNode* n = decoder->GetDecodeNode(name);
|
||||
VIXL_ASSERT(n != NULL);
|
||||
if (!n->IsCompiled()) {
|
||||
n->Compile(decoder);
|
||||
}
|
||||
VIXL_ASSERT(n->IsCompiled());
|
||||
compiled_node_->SetNodeForBits(bits, n->GetCompiledNode());
|
||||
}
|
||||
|
||||
BitExtractFn DecodeNode::GetBitExtractFunction(uint32_t mask) {
|
||||
// Instantiate a templated bit extraction function for every pattern we
|
||||
// might encounter. If the assertion in the default clause is reached, add a
|
||||
// new instantiation below using the information in the failure message.
|
||||
BitExtractFn bit_extract_fn = NULL;
|
||||
switch (mask) {
|
||||
#define INSTANTIATE_TEMPLATE(M) \
|
||||
case M: \
|
||||
bit_extract_fn = &Instruction::ExtractBits<M>; \
|
||||
break;
|
||||
INSTANTIATE_TEMPLATE(0x000001e0);
|
||||
INSTANTIATE_TEMPLATE(0x00000400);
|
||||
INSTANTIATE_TEMPLATE(0x00000800);
|
||||
INSTANTIATE_TEMPLATE(0x00000c00);
|
||||
INSTANTIATE_TEMPLATE(0x00001000);
|
||||
INSTANTIATE_TEMPLATE(0x00001800);
|
||||
INSTANTIATE_TEMPLATE(0x00001c00);
|
||||
INSTANTIATE_TEMPLATE(0x00004000);
|
||||
INSTANTIATE_TEMPLATE(0x00008000);
|
||||
INSTANTIATE_TEMPLATE(0x0000f000);
|
||||
INSTANTIATE_TEMPLATE(0x0000fc00);
|
||||
INSTANTIATE_TEMPLATE(0x00060010);
|
||||
INSTANTIATE_TEMPLATE(0x00093e00);
|
||||
INSTANTIATE_TEMPLATE(0x000c1000);
|
||||
INSTANTIATE_TEMPLATE(0x00100000);
|
||||
INSTANTIATE_TEMPLATE(0x00101800);
|
||||
INSTANTIATE_TEMPLATE(0x00140000);
|
||||
INSTANTIATE_TEMPLATE(0x00180000);
|
||||
INSTANTIATE_TEMPLATE(0x00181000);
|
||||
INSTANTIATE_TEMPLATE(0x00190000);
|
||||
INSTANTIATE_TEMPLATE(0x00191400);
|
||||
INSTANTIATE_TEMPLATE(0x001c0000);
|
||||
INSTANTIATE_TEMPLATE(0x001c1800);
|
||||
INSTANTIATE_TEMPLATE(0x001f0000);
|
||||
INSTANTIATE_TEMPLATE(0x0020fc00);
|
||||
INSTANTIATE_TEMPLATE(0x0038f000);
|
||||
INSTANTIATE_TEMPLATE(0x00400000);
|
||||
INSTANTIATE_TEMPLATE(0x00400010);
|
||||
INSTANTIATE_TEMPLATE(0x0040f000);
|
||||
INSTANTIATE_TEMPLATE(0x00500000);
|
||||
INSTANTIATE_TEMPLATE(0x00800000);
|
||||
INSTANTIATE_TEMPLATE(0x00800010);
|
||||
INSTANTIATE_TEMPLATE(0x00801800);
|
||||
INSTANTIATE_TEMPLATE(0x009f0000);
|
||||
INSTANTIATE_TEMPLATE(0x00c00000);
|
||||
INSTANTIATE_TEMPLATE(0x00c00010);
|
||||
INSTANTIATE_TEMPLATE(0x00cf8000);
|
||||
INSTANTIATE_TEMPLATE(0x00db0000);
|
||||
INSTANTIATE_TEMPLATE(0x00dc0000);
|
||||
INSTANTIATE_TEMPLATE(0x00e00003);
|
||||
INSTANTIATE_TEMPLATE(0x00f80400);
|
||||
INSTANTIATE_TEMPLATE(0x01e00000);
|
||||
INSTANTIATE_TEMPLATE(0x03800000);
|
||||
INSTANTIATE_TEMPLATE(0x04c0f000);
|
||||
INSTANTIATE_TEMPLATE(0x10800400);
|
||||
INSTANTIATE_TEMPLATE(0x1e000000);
|
||||
INSTANTIATE_TEMPLATE(0x20000000);
|
||||
INSTANTIATE_TEMPLATE(0x20000410);
|
||||
INSTANTIATE_TEMPLATE(0x20007000);
|
||||
INSTANTIATE_TEMPLATE(0x20007800);
|
||||
INSTANTIATE_TEMPLATE(0x2000f000);
|
||||
INSTANTIATE_TEMPLATE(0x2000f800);
|
||||
INSTANTIATE_TEMPLATE(0x201e0c00);
|
||||
INSTANTIATE_TEMPLATE(0x20803800);
|
||||
INSTANTIATE_TEMPLATE(0x20c0cc00);
|
||||
INSTANTIATE_TEMPLATE(0x20c0f000);
|
||||
INSTANTIATE_TEMPLATE(0x20c0f800);
|
||||
INSTANTIATE_TEMPLATE(0x20c1f000);
|
||||
INSTANTIATE_TEMPLATE(0x51e00000);
|
||||
INSTANTIATE_TEMPLATE(0x60007800);
|
||||
INSTANTIATE_TEMPLATE(0x6000f800);
|
||||
INSTANTIATE_TEMPLATE(0x601e0000);
|
||||
INSTANTIATE_TEMPLATE(0x80007c00);
|
||||
INSTANTIATE_TEMPLATE(0x80017c00);
|
||||
INSTANTIATE_TEMPLATE(0x80408000);
|
||||
INSTANTIATE_TEMPLATE(0x80a07c00);
|
||||
INSTANTIATE_TEMPLATE(0x80df0000);
|
||||
INSTANTIATE_TEMPLATE(0x80e08000);
|
||||
INSTANTIATE_TEMPLATE(0xa0c00000);
|
||||
INSTANTIATE_TEMPLATE(0xb5a00000);
|
||||
INSTANTIATE_TEMPLATE(0xc0c00c00);
|
||||
INSTANTIATE_TEMPLATE(0xc4400000);
|
||||
INSTANTIATE_TEMPLATE(0xc4c00000);
|
||||
INSTANTIATE_TEMPLATE(0xe0400000);
|
||||
INSTANTIATE_TEMPLATE(0xe120e000);
|
||||
INSTANTIATE_TEMPLATE(0xe3c00000);
|
||||
INSTANTIATE_TEMPLATE(0xf1200000);
|
||||
#undef INSTANTIATE_TEMPLATE
|
||||
default:
|
||||
printf("Node %s: No template instantiated for extracting 0x%08x.\n",
|
||||
GetName().c_str(),
|
||||
GenerateSampledBitsMask());
|
||||
printf("Add one in %s above line %d:\n", __FILE__, __LINE__);
|
||||
printf(" INSTANTIATE_TEMPLATE(0x%08x);\n", GenerateSampledBitsMask());
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
return bit_extract_fn;
|
||||
}
|
||||
|
||||
BitExtractFn DecodeNode::GetBitExtractFunction(uint32_t mask, uint32_t value) {
|
||||
// Instantiate a templated bit extraction function for every pattern we
|
||||
// might encounter. If the assertion in the following check fails, add a
|
||||
// new instantiation below using the information in the failure message.
|
||||
bool instantiated = false;
|
||||
BitExtractFn bit_extract_fn = NULL;
|
||||
#define INSTANTIATE_TEMPLATE(M, V) \
|
||||
if ((mask == M) && (value == V)) { \
|
||||
bit_extract_fn = &Instruction::IsMaskedValue<M, V>; \
|
||||
instantiated = true; \
|
||||
}
|
||||
INSTANTIATE_TEMPLATE(0x0000001c, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00000210, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x000003c0, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00001c00, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00001c0f, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00003000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00007800, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x0000e000, 0x0000a000);
|
||||
INSTANTIATE_TEMPLATE(0x0000f000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00030400, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x0003801f, 0x0000000d);
|
||||
INSTANTIATE_TEMPLATE(0x00060210, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00060810, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00060a10, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00060bf0, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00061e10, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00061e10, 0x00000400);
|
||||
INSTANTIATE_TEMPLATE(0x00070200, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x000b1e10, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x000f0000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00130e1f, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00130fff, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00180000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00180000, 0x00100000);
|
||||
INSTANTIATE_TEMPLATE(0x001e0000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x001f0000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x001f0000, 0x001f0000);
|
||||
INSTANTIATE_TEMPLATE(0x0038e000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x0039e000, 0x00002000);
|
||||
INSTANTIATE_TEMPLATE(0x003ae000, 0x00002000);
|
||||
INSTANTIATE_TEMPLATE(0x003ce000, 0x00042000);
|
||||
INSTANTIATE_TEMPLATE(0x005f0000, 0x001f0000);
|
||||
INSTANTIATE_TEMPLATE(0x00780000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00870210, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00c00000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x00c00000, 0x00800000);
|
||||
INSTANTIATE_TEMPLATE(0x00c00000, 0x00c00000);
|
||||
INSTANTIATE_TEMPLATE(0x00c00010, 0x00800000);
|
||||
INSTANTIATE_TEMPLATE(0x00ca1e10, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x01000010, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x20000800, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x20008000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x20040000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x201e8000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x60000000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x60000000, 0x20000000);
|
||||
INSTANTIATE_TEMPLATE(0x60000000, 0x60000000);
|
||||
INSTANTIATE_TEMPLATE(0x60200000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x80008000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0x80008000, 0x00008000);
|
||||
INSTANTIATE_TEMPLATE(0x80400000, 0x00400000);
|
||||
INSTANTIATE_TEMPLATE(0xa00003e0, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0xa000c007, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0xa0100000, 0x00000000);
|
||||
INSTANTIATE_TEMPLATE(0xc4000000, 0xc0000000);
|
||||
INSTANTIATE_TEMPLATE(0xc4000000, 0xc4000000);
|
||||
INSTANTIATE_TEMPLATE(0xe0000010, 0xa0000000);
|
||||
INSTANTIATE_TEMPLATE(0xe01c0000, 0x20000000);
|
||||
INSTANTIATE_TEMPLATE(0xe1ff0000, 0x00000000);
|
||||
#undef INSTANTIATE_TEMPLATE
|
||||
|
||||
if (!instantiated) {
|
||||
printf(
|
||||
"Node %s: no template instantiated for mask 0x%08x, value = "
|
||||
"0x%08x.\n",
|
||||
GetName().c_str(),
|
||||
mask,
|
||||
value);
|
||||
printf("Add one in %s above line %d:\n", __FILE__, __LINE__);
|
||||
printf(" INSTANTIATE_TEMPLATE(0x%08x, 0x%08x);\n", mask, value);
|
||||
VIXL_UNREACHABLE();
|
||||
}
|
||||
return bit_extract_fn;
|
||||
}
|
||||
|
||||
bool DecodeNode::TryCompileOptimisedDecodeTable(Decoder* decoder) {
|
||||
// EitherOr optimisation: if there are only one or two patterns in the table,
|
||||
// try to optimise the node to exploit that.
|
||||
size_t table_size = pattern_table_.size();
|
||||
if ((table_size <= 2) && (GetSampledBitsCount() > 1)) {
|
||||
// TODO: support 'x' in this optimisation by dropping the sampled bit
|
||||
// positions before making the mask/value.
|
||||
if ((strchr(pattern_table_[0].pattern, 'x') == NULL) &&
|
||||
((table_size == 1) ||
|
||||
(strcmp(pattern_table_[1].pattern, "otherwise") == 0))) {
|
||||
// A pattern table consisting of a fixed pattern with no x's, and an
|
||||
// "otherwise" or absent case. Optimise this into an instruction mask and
|
||||
// value test.
|
||||
uint32_t single_decode_mask = 0;
|
||||
uint32_t single_decode_value = 0;
|
||||
std::vector<uint8_t> bits = GetSampledBits();
|
||||
|
||||
// Construct the instruction mask and value from the pattern.
|
||||
VIXL_ASSERT(bits.size() == strlen(pattern_table_[0].pattern));
|
||||
for (size_t i = 0; i < bits.size(); i++) {
|
||||
single_decode_mask |= 1U << bits[i];
|
||||
if (pattern_table_[0].pattern[i] == '1') {
|
||||
single_decode_value |= 1U << bits[i];
|
||||
}
|
||||
}
|
||||
BitExtractFn bit_extract_fn =
|
||||
GetBitExtractFunction(single_decode_mask, single_decode_value);
|
||||
|
||||
// Create a compiled node that contains a two entry table for the
|
||||
// either/or cases.
|
||||
CreateCompiledNode(bit_extract_fn, 2);
|
||||
|
||||
// Set DecodeNode for when the instruction after masking doesn't match the
|
||||
// value.
|
||||
const char* doesnt_match_handler =
|
||||
(table_size == 1) ? "VisitUnallocated" : pattern_table_[1].handler;
|
||||
CompileNodeForBits(decoder, doesnt_match_handler, 0);
|
||||
|
||||
// Set DecodeNode for when it does match.
|
||||
CompileNodeForBits(decoder, pattern_table_[0].handler, 1);
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
CompiledDecodeNode* DecodeNode::Compile(Decoder* decoder) {
|
||||
if (IsLeafNode()) {
|
||||
// A leaf node is a simple wrapper around a visitor function, with no
|
||||
// instruction decoding to do.
|
||||
CreateVisitorNode();
|
||||
} else if (!TryCompileOptimisedDecodeTable(decoder)) {
|
||||
// The "otherwise" node is the default next node if no pattern matches.
|
||||
std::string otherwise = "VisitUnallocated";
|
||||
|
||||
// For each pattern in pattern_table_, create an entry in matches that
|
||||
// has a corresponding mask and value for the pattern.
|
||||
std::vector<MaskValuePair> matches;
|
||||
for (size_t i = 0; i < pattern_table_.size(); i++) {
|
||||
if (strcmp(pattern_table_[i].pattern, "otherwise") == 0) {
|
||||
// "otherwise" must be the last pattern in the list, otherwise the
|
||||
// indices won't match for pattern_table_ and matches.
|
||||
VIXL_ASSERT(i == pattern_table_.size() - 1);
|
||||
otherwise = pattern_table_[i].handler;
|
||||
} else {
|
||||
matches.push_back(GenerateMaskValuePair(
|
||||
GenerateOrderedPattern(pattern_table_[i].pattern)));
|
||||
}
|
||||
}
|
||||
|
||||
BitExtractFn bit_extract_fn =
|
||||
GetBitExtractFunction(GenerateSampledBitsMask());
|
||||
|
||||
// Create a compiled node that contains a table with an entry for every bit
|
||||
// pattern.
|
||||
CreateCompiledNode(bit_extract_fn, 1U << GetSampledBitsCount());
|
||||
VIXL_ASSERT(compiled_node_ != NULL);
|
||||
|
||||
// When we find a pattern matches the representation, set the node's decode
|
||||
// function for that representation to the corresponding function.
|
||||
for (uint32_t bits = 0; bits < (1U << GetSampledBitsCount()); bits++) {
|
||||
for (size_t i = 0; i < matches.size(); i++) {
|
||||
if ((bits & matches[i].first) == matches[i].second) {
|
||||
// Only one instruction class should match for each value of bits, so
|
||||
// if we get here, the node pointed to should still be unallocated.
|
||||
VIXL_ASSERT(compiled_node_->GetNodeForBits(bits) == NULL);
|
||||
CompileNodeForBits(decoder, pattern_table_[i].handler, bits);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// If the decode_table_ entry for these bits is still NULL, the
|
||||
// instruction must be handled by the "otherwise" case, which by default
|
||||
// is the Unallocated visitor.
|
||||
if (compiled_node_->GetNodeForBits(bits) == NULL) {
|
||||
CompileNodeForBits(decoder, otherwise, bits);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
VIXL_ASSERT(compiled_node_ != NULL);
|
||||
return compiled_node_;
|
||||
}
|
||||
|
||||
void CompiledDecodeNode::Decode(const Instruction* instr) const {
|
||||
if (IsLeafNode()) {
|
||||
// If this node is a leaf, call the registered visitor function.
|
||||
VIXL_ASSERT(decoder_ != NULL);
|
||||
(decoder_->*visitor_fn_)(instr);
|
||||
} else {
|
||||
// Otherwise, using the sampled bit extractor for this node, look up the
|
||||
// next node in the decode tree, and call its Decode method.
|
||||
VIXL_ASSERT(bit_extract_fn_ != NULL);
|
||||
VIXL_ASSERT((instr->*bit_extract_fn_)() < decode_table_size_);
|
||||
VIXL_ASSERT(decode_table_[(instr->*bit_extract_fn_)()] != NULL);
|
||||
decode_table_[(instr->*bit_extract_fn_)()]->Decode(instr);
|
||||
}
|
||||
}
|
||||
|
||||
DecodeNode::MaskValuePair DecodeNode::GenerateMaskValuePair(
|
||||
std::string pattern) const {
|
||||
uint32_t mask = 0, value = 0;
|
||||
for (size_t i = 0; i < pattern.size(); i++) {
|
||||
mask |= ((pattern[i] == 'x') ? 0 : 1) << i;
|
||||
value |= ((pattern[i] == '1') ? 1 : 0) << i;
|
||||
}
|
||||
return std::make_pair(mask, value);
|
||||
}
|
||||
|
||||
std::string DecodeNode::GenerateOrderedPattern(std::string pattern) const {
|
||||
std::vector<uint8_t> sampled_bits = GetSampledBits();
|
||||
// Construct a temporary 32-character string containing '_', then at each
|
||||
// sampled bit position, set the corresponding pattern character.
|
||||
std::string temp(32, '_');
|
||||
for (size_t i = 0; i < sampled_bits.size(); i++) {
|
||||
temp[sampled_bits[i]] = pattern[i];
|
||||
}
|
||||
|
||||
// Iterate through the temporary string, filtering out the non-'_' characters
|
||||
// into a new ordered pattern result string.
|
||||
std::string result;
|
||||
for (size_t i = 0; i < temp.size(); i++) {
|
||||
if (temp[i] != '_') {
|
||||
result.push_back(temp[i]);
|
||||
}
|
||||
}
|
||||
VIXL_ASSERT(result.size() == sampled_bits.size());
|
||||
return result;
|
||||
}
|
||||
|
||||
uint32_t DecodeNode::GenerateSampledBitsMask() const {
|
||||
std::vector<uint8_t> sampled_bits = GetSampledBits();
|
||||
uint32_t mask = 0;
|
||||
for (size_t i = 0; i < sampled_bits.size(); i++) {
|
||||
mask |= 1 << sampled_bits[i];
|
||||
}
|
||||
return mask;
|
||||
}
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
680
externals/vixl/vixl/src/aarch64/decoder-aarch64.h
vendored
Normal file
680
externals/vixl/vixl/src/aarch64/decoder-aarch64.h
vendored
Normal file
|
@ -0,0 +1,680 @@
|
|||
// Copyright 2019, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_DECODER_AARCH64_H_
|
||||
#define VIXL_AARCH64_DECODER_AARCH64_H_
|
||||
|
||||
#include <list>
|
||||
#include <map>
|
||||
#include <string>
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
|
||||
|
||||
// List macro containing all visitors needed by the decoder class.
|
||||
|
||||
#define VISITOR_LIST_THAT_RETURN(V) \
|
||||
V(AddSubExtended) \
|
||||
V(AddSubImmediate) \
|
||||
V(AddSubShifted) \
|
||||
V(AddSubWithCarry) \
|
||||
V(AtomicMemory) \
|
||||
V(Bitfield) \
|
||||
V(CompareBranch) \
|
||||
V(ConditionalBranch) \
|
||||
V(ConditionalCompareImmediate) \
|
||||
V(ConditionalCompareRegister) \
|
||||
V(ConditionalSelect) \
|
||||
V(Crypto2RegSHA) \
|
||||
V(Crypto3RegSHA) \
|
||||
V(CryptoAES) \
|
||||
V(DataProcessing1Source) \
|
||||
V(DataProcessing2Source) \
|
||||
V(DataProcessing3Source) \
|
||||
V(EvaluateIntoFlags) \
|
||||
V(Exception) \
|
||||
V(Extract) \
|
||||
V(FPCompare) \
|
||||
V(FPConditionalCompare) \
|
||||
V(FPConditionalSelect) \
|
||||
V(FPDataProcessing1Source) \
|
||||
V(FPDataProcessing2Source) \
|
||||
V(FPDataProcessing3Source) \
|
||||
V(FPFixedPointConvert) \
|
||||
V(FPImmediate) \
|
||||
V(FPIntegerConvert) \
|
||||
V(LoadLiteral) \
|
||||
V(LoadStoreExclusive) \
|
||||
V(LoadStorePAC) \
|
||||
V(LoadStorePairNonTemporal) \
|
||||
V(LoadStorePairOffset) \
|
||||
V(LoadStorePairPostIndex) \
|
||||
V(LoadStorePairPreIndex) \
|
||||
V(LoadStorePostIndex) \
|
||||
V(LoadStorePreIndex) \
|
||||
V(LoadStoreRCpcUnscaledOffset) \
|
||||
V(LoadStoreRegisterOffset) \
|
||||
V(LoadStoreUnscaledOffset) \
|
||||
V(LoadStoreUnsignedOffset) \
|
||||
V(LogicalImmediate) \
|
||||
V(LogicalShifted) \
|
||||
V(MoveWideImmediate) \
|
||||
V(NEON2RegMisc) \
|
||||
V(NEON2RegMiscFP16) \
|
||||
V(NEON3Different) \
|
||||
V(NEON3Same) \
|
||||
V(NEON3SameExtra) \
|
||||
V(NEON3SameFP16) \
|
||||
V(NEONAcrossLanes) \
|
||||
V(NEONByIndexedElement) \
|
||||
V(NEONCopy) \
|
||||
V(NEONExtract) \
|
||||
V(NEONLoadStoreMultiStruct) \
|
||||
V(NEONLoadStoreMultiStructPostIndex) \
|
||||
V(NEONLoadStoreSingleStruct) \
|
||||
V(NEONLoadStoreSingleStructPostIndex) \
|
||||
V(NEONModifiedImmediate) \
|
||||
V(NEONPerm) \
|
||||
V(NEONScalar2RegMisc) \
|
||||
V(NEONScalar2RegMiscFP16) \
|
||||
V(NEONScalar3Diff) \
|
||||
V(NEONScalar3Same) \
|
||||
V(NEONScalar3SameExtra) \
|
||||
V(NEONScalar3SameFP16) \
|
||||
V(NEONScalarByIndexedElement) \
|
||||
V(NEONScalarCopy) \
|
||||
V(NEONScalarPairwise) \
|
||||
V(NEONScalarShiftImmediate) \
|
||||
V(NEONShiftImmediate) \
|
||||
V(NEONTable) \
|
||||
V(PCRelAddressing) \
|
||||
V(RotateRightIntoFlags) \
|
||||
V(SVE32BitGatherLoad_ScalarPlus32BitUnscaledOffsets) \
|
||||
V(SVE32BitGatherLoad_VectorPlusImm) \
|
||||
V(SVE32BitGatherLoadHalfwords_ScalarPlus32BitScaledOffsets) \
|
||||
V(SVE32BitGatherLoadWords_ScalarPlus32BitScaledOffsets) \
|
||||
V(SVE32BitGatherPrefetch_ScalarPlus32BitScaledOffsets) \
|
||||
V(SVE32BitGatherPrefetch_VectorPlusImm) \
|
||||
V(SVE32BitScatterStore_ScalarPlus32BitScaledOffsets) \
|
||||
V(SVE32BitScatterStore_ScalarPlus32BitUnscaledOffsets) \
|
||||
V(SVE32BitScatterStore_VectorPlusImm) \
|
||||
V(SVE64BitGatherLoad_ScalarPlus32BitUnpackedScaledOffsets) \
|
||||
V(SVE64BitGatherLoad_ScalarPlus64BitScaledOffsets) \
|
||||
V(SVE64BitGatherLoad_ScalarPlus64BitUnscaledOffsets) \
|
||||
V(SVE64BitGatherLoad_ScalarPlusUnpacked32BitUnscaledOffsets) \
|
||||
V(SVE64BitGatherLoad_VectorPlusImm) \
|
||||
V(SVE64BitGatherPrefetch_ScalarPlus64BitScaledOffsets) \
|
||||
V(SVE64BitGatherPrefetch_ScalarPlusUnpacked32BitScaledOffsets) \
|
||||
V(SVE64BitGatherPrefetch_VectorPlusImm) \
|
||||
V(SVE64BitScatterStore_ScalarPlus64BitScaledOffsets) \
|
||||
V(SVE64BitScatterStore_ScalarPlus64BitUnscaledOffsets) \
|
||||
V(SVE64BitScatterStore_ScalarPlusUnpacked32BitScaledOffsets) \
|
||||
V(SVE64BitScatterStore_ScalarPlusUnpacked32BitUnscaledOffsets) \
|
||||
V(SVE64BitScatterStore_VectorPlusImm) \
|
||||
V(SVEAddressGeneration) \
|
||||
V(SVEBitwiseLogicalUnpredicated) \
|
||||
V(SVEBitwiseShiftUnpredicated) \
|
||||
V(SVEFFRInitialise) \
|
||||
V(SVEFFRWriteFromPredicate) \
|
||||
V(SVEFPAccumulatingReduction) \
|
||||
V(SVEFPArithmeticUnpredicated) \
|
||||
V(SVEFPCompareVectors) \
|
||||
V(SVEFPCompareWithZero) \
|
||||
V(SVEFPComplexAddition) \
|
||||
V(SVEFPComplexMulAdd) \
|
||||
V(SVEFPComplexMulAddIndex) \
|
||||
V(SVEFPFastReduction) \
|
||||
V(SVEFPMulIndex) \
|
||||
V(SVEFPMulAdd) \
|
||||
V(SVEFPMulAddIndex) \
|
||||
V(SVEFPUnaryOpUnpredicated) \
|
||||
V(SVEIncDecByPredicateCount) \
|
||||
V(SVEIndexGeneration) \
|
||||
V(SVEIntArithmeticUnpredicated) \
|
||||
V(SVEIntCompareSignedImm) \
|
||||
V(SVEIntCompareUnsignedImm) \
|
||||
V(SVEIntCompareVectors) \
|
||||
V(SVEIntMulAddPredicated) \
|
||||
V(SVEIntMulAddUnpredicated) \
|
||||
V(SVEIntReduction) \
|
||||
V(SVEIntUnaryArithmeticPredicated) \
|
||||
V(SVEMovprfx) \
|
||||
V(SVEMulIndex) \
|
||||
V(SVEPermuteVectorExtract) \
|
||||
V(SVEPermuteVectorInterleaving) \
|
||||
V(SVEPredicateCount) \
|
||||
V(SVEPredicateLogical) \
|
||||
V(SVEPropagateBreak) \
|
||||
V(SVEStackFrameAdjustment) \
|
||||
V(SVEStackFrameSize) \
|
||||
V(SVEVectorSelect) \
|
||||
V(SVEBitwiseLogical_Predicated) \
|
||||
V(SVEBitwiseLogicalWithImm_Unpredicated) \
|
||||
V(SVEBitwiseShiftByImm_Predicated) \
|
||||
V(SVEBitwiseShiftByVector_Predicated) \
|
||||
V(SVEBitwiseShiftByWideElements_Predicated) \
|
||||
V(SVEBroadcastBitmaskImm) \
|
||||
V(SVEBroadcastFPImm_Unpredicated) \
|
||||
V(SVEBroadcastGeneralRegister) \
|
||||
V(SVEBroadcastIndexElement) \
|
||||
V(SVEBroadcastIntImm_Unpredicated) \
|
||||
V(SVECompressActiveElements) \
|
||||
V(SVEConditionallyBroadcastElementToVector) \
|
||||
V(SVEConditionallyExtractElementToSIMDFPScalar) \
|
||||
V(SVEConditionallyExtractElementToGeneralRegister) \
|
||||
V(SVEConditionallyTerminateScalars) \
|
||||
V(SVEConstructivePrefix_Unpredicated) \
|
||||
V(SVEContiguousFirstFaultLoad_ScalarPlusScalar) \
|
||||
V(SVEContiguousLoad_ScalarPlusImm) \
|
||||
V(SVEContiguousLoad_ScalarPlusScalar) \
|
||||
V(SVEContiguousNonFaultLoad_ScalarPlusImm) \
|
||||
V(SVEContiguousNonTemporalLoad_ScalarPlusImm) \
|
||||
V(SVEContiguousNonTemporalLoad_ScalarPlusScalar) \
|
||||
V(SVEContiguousNonTemporalStore_ScalarPlusImm) \
|
||||
V(SVEContiguousNonTemporalStore_ScalarPlusScalar) \
|
||||
V(SVEContiguousPrefetch_ScalarPlusImm) \
|
||||
V(SVEContiguousPrefetch_ScalarPlusScalar) \
|
||||
V(SVEContiguousStore_ScalarPlusImm) \
|
||||
V(SVEContiguousStore_ScalarPlusScalar) \
|
||||
V(SVECopySIMDFPScalarRegisterToVector_Predicated) \
|
||||
V(SVECopyFPImm_Predicated) \
|
||||
V(SVECopyGeneralRegisterToVector_Predicated) \
|
||||
V(SVECopyIntImm_Predicated) \
|
||||
V(SVEElementCount) \
|
||||
V(SVEExtractElementToSIMDFPScalarRegister) \
|
||||
V(SVEExtractElementToGeneralRegister) \
|
||||
V(SVEFPArithmetic_Predicated) \
|
||||
V(SVEFPArithmeticWithImm_Predicated) \
|
||||
V(SVEFPConvertPrecision) \
|
||||
V(SVEFPConvertToInt) \
|
||||
V(SVEFPExponentialAccelerator) \
|
||||
V(SVEFPRoundToIntegralValue) \
|
||||
V(SVEFPTrigMulAddCoefficient) \
|
||||
V(SVEFPTrigSelectCoefficient) \
|
||||
V(SVEFPUnaryOp) \
|
||||
V(SVEIncDecRegisterByElementCount) \
|
||||
V(SVEIncDecVectorByElementCount) \
|
||||
V(SVEInsertSIMDFPScalarRegister) \
|
||||
V(SVEInsertGeneralRegister) \
|
||||
V(SVEIntAddSubtractImm_Unpredicated) \
|
||||
V(SVEIntAddSubtractVectors_Predicated) \
|
||||
V(SVEIntCompareScalarCountAndLimit) \
|
||||
V(SVEIntConvertToFP) \
|
||||
V(SVEIntDivideVectors_Predicated) \
|
||||
V(SVEIntMinMaxImm_Unpredicated) \
|
||||
V(SVEIntMinMaxDifference_Predicated) \
|
||||
V(SVEIntMulImm_Unpredicated) \
|
||||
V(SVEIntMulVectors_Predicated) \
|
||||
V(SVELoadAndBroadcastElement) \
|
||||
V(SVELoadAndBroadcastQuadword_ScalarPlusImm) \
|
||||
V(SVELoadAndBroadcastQuadword_ScalarPlusScalar) \
|
||||
V(SVELoadMultipleStructures_ScalarPlusImm) \
|
||||
V(SVELoadMultipleStructures_ScalarPlusScalar) \
|
||||
V(SVELoadPredicateRegister) \
|
||||
V(SVELoadVectorRegister) \
|
||||
V(SVEPartitionBreakCondition) \
|
||||
V(SVEPermutePredicateElements) \
|
||||
V(SVEPredicateFirstActive) \
|
||||
V(SVEPredicateInitialize) \
|
||||
V(SVEPredicateNextActive) \
|
||||
V(SVEPredicateReadFromFFR_Predicated) \
|
||||
V(SVEPredicateReadFromFFR_Unpredicated) \
|
||||
V(SVEPredicateTest) \
|
||||
V(SVEPredicateZero) \
|
||||
V(SVEPropagateBreakToNextPartition) \
|
||||
V(SVEReversePredicateElements) \
|
||||
V(SVEReverseVectorElements) \
|
||||
V(SVEReverseWithinElements) \
|
||||
V(SVESaturatingIncDecRegisterByElementCount) \
|
||||
V(SVESaturatingIncDecVectorByElementCount) \
|
||||
V(SVEStoreMultipleStructures_ScalarPlusImm) \
|
||||
V(SVEStoreMultipleStructures_ScalarPlusScalar) \
|
||||
V(SVEStorePredicateRegister) \
|
||||
V(SVEStoreVectorRegister) \
|
||||
V(SVETableLookup) \
|
||||
V(SVEUnpackPredicateElements) \
|
||||
V(SVEUnpackVectorElements) \
|
||||
V(SVEVectorSplice_Destructive) \
|
||||
V(System) \
|
||||
V(TestBranch) \
|
||||
V(Unallocated) \
|
||||
V(UnconditionalBranch) \
|
||||
V(UnconditionalBranchToRegister) \
|
||||
V(Unimplemented)
|
||||
|
||||
#define VISITOR_LIST_THAT_DONT_RETURN(V) V(Reserved)
|
||||
|
||||
#define VISITOR_LIST(V) \
|
||||
VISITOR_LIST_THAT_RETURN(V) \
|
||||
VISITOR_LIST_THAT_DONT_RETURN(V)
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// The Visitor interface. Disassembler and simulator (and other tools)
|
||||
// must provide implementations for all of these functions.
|
||||
//
|
||||
// Note that this class must change in breaking ways with even minor additions
|
||||
// to VIXL, and so its API should be considered unstable. User classes that
|
||||
// inherit from this one should be expected to break even on minor version
|
||||
// updates. If this is a problem, consider using DecoderVisitorWithDefaults
|
||||
// instead.
|
||||
class DecoderVisitor {
|
||||
public:
|
||||
enum VisitorConstness { kConstVisitor, kNonConstVisitor };
|
||||
explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
|
||||
: constness_(constness) {}
|
||||
|
||||
virtual ~DecoderVisitor() {}
|
||||
|
||||
#define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
bool IsConstVisitor() const { return constness_ == kConstVisitor; }
|
||||
Instruction* MutableInstruction(const Instruction* instr) {
|
||||
VIXL_ASSERT(!IsConstVisitor());
|
||||
return const_cast<Instruction*>(instr);
|
||||
}
|
||||
|
||||
private:
|
||||
const VisitorConstness constness_;
|
||||
};
|
||||
|
||||
// As above, but a default (no-op) implementation for each visitor is provided.
|
||||
// This is useful for derived class that only care about specific visitors.
|
||||
//
|
||||
// A minor version update may add a visitor, but will never remove one, so it is
|
||||
// safe (and recommended) to use `override` in derived classes.
|
||||
class DecoderVisitorWithDefaults : public DecoderVisitor {
|
||||
public:
|
||||
explicit DecoderVisitorWithDefaults(
|
||||
VisitorConstness constness = kConstVisitor)
|
||||
: DecoderVisitor(constness) {}
|
||||
|
||||
virtual ~DecoderVisitorWithDefaults() {}
|
||||
|
||||
#define DECLARE(A) \
|
||||
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE { USE(instr); }
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
};
|
||||
|
||||
class DecodeNode;
|
||||
class CompiledDecodeNode;
|
||||
|
||||
// The instruction decoder is constructed from a graph of decode nodes. At each
|
||||
// node, a number of bits are sampled from the instruction being decoded. The
|
||||
// resulting value is used to look up the next node in the graph, which then
|
||||
// samples other bits, and moves to other decode nodes. Eventually, a visitor
|
||||
// node is reached, and the corresponding visitor function is called, which
|
||||
// handles the instruction.
|
||||
class Decoder {
|
||||
public:
|
||||
Decoder() { ConstructDecodeGraph(); }
|
||||
|
||||
// Top-level wrappers around the actual decoding function.
|
||||
void Decode(const Instruction* instr);
|
||||
void Decode(Instruction* instr);
|
||||
|
||||
// Decode all instructions from start (inclusive) to end (exclusive).
|
||||
template <typename T>
|
||||
void Decode(T start, T end) {
|
||||
for (T instr = start; instr < end; instr = instr->GetNextInstruction()) {
|
||||
Decode(instr);
|
||||
}
|
||||
}
|
||||
|
||||
// Register a new visitor class with the decoder.
|
||||
// Decode() will call the corresponding visitor method from all registered
|
||||
// visitor classes when decoding reaches the leaf node of the instruction
|
||||
// decode tree.
|
||||
// Visitors are called in order.
|
||||
// A visitor can be registered multiple times.
|
||||
//
|
||||
// d.AppendVisitor(V1);
|
||||
// d.AppendVisitor(V2);
|
||||
// d.PrependVisitor(V2);
|
||||
// d.AppendVisitor(V3);
|
||||
//
|
||||
// d.Decode(i);
|
||||
//
|
||||
// will call in order visitor methods in V2, V1, V2, V3.
|
||||
void AppendVisitor(DecoderVisitor* visitor);
|
||||
void PrependVisitor(DecoderVisitor* visitor);
|
||||
// These helpers register `new_visitor` before or after the first instance of
|
||||
// `registered_visiter` in the list.
|
||||
// So if
|
||||
// V1, V2, V1, V2
|
||||
// are registered in this order in the decoder, calls to
|
||||
// d.InsertVisitorAfter(V3, V1);
|
||||
// d.InsertVisitorBefore(V4, V2);
|
||||
// will yield the order
|
||||
// V1, V3, V4, V2, V1, V2
|
||||
//
|
||||
// For more complex modifications of the order of registered visitors, one can
|
||||
// directly access and modify the list of visitors via the `visitors()'
|
||||
// accessor.
|
||||
void InsertVisitorBefore(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor);
|
||||
void InsertVisitorAfter(DecoderVisitor* new_visitor,
|
||||
DecoderVisitor* registered_visitor);
|
||||
|
||||
// Remove all instances of a previously registered visitor class from the list
|
||||
// of visitors stored by the decoder.
|
||||
void RemoveVisitor(DecoderVisitor* visitor);
|
||||
|
||||
#define DECLARE(A) void Visit##A(const Instruction* instr);
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
std::list<DecoderVisitor*>* visitors() { return &visitors_; }
|
||||
|
||||
// Get a DecodeNode by name from the Decoder's map.
|
||||
DecodeNode* GetDecodeNode(std::string name);
|
||||
|
||||
private:
|
||||
// Decodes an instruction and calls the visitor functions registered with the
|
||||
// Decoder class.
|
||||
void DecodeInstruction(const Instruction* instr);
|
||||
|
||||
// Add an initialised DecodeNode to the decode_node_ map.
|
||||
void AddDecodeNode(const DecodeNode& node);
|
||||
|
||||
// Visitors are registered in a list.
|
||||
std::list<DecoderVisitor*> visitors_;
|
||||
|
||||
// Compile the dynamically generated decode graph based on the static
|
||||
// information in kDecodeMapping and kVisitorNodes.
|
||||
void ConstructDecodeGraph();
|
||||
|
||||
// Root node for the compiled decoder graph, stored here to avoid a map lookup
|
||||
// for every instruction decoded.
|
||||
CompiledDecodeNode* compiled_decoder_root_;
|
||||
|
||||
// Map of node names to DecodeNodes.
|
||||
std::map<std::string, DecodeNode> decode_nodes_;
|
||||
};
|
||||
|
||||
const int kMaxDecodeSampledBits = 16;
|
||||
const int kMaxDecodeMappings = 100;
|
||||
typedef void (Decoder::*DecodeFnPtr)(const Instruction*);
|
||||
typedef uint32_t (Instruction::*BitExtractFn)(void) const;
|
||||
|
||||
// A Visitor node maps the name of a visitor to the function that handles it.
|
||||
struct VisitorNode {
|
||||
const char* name;
|
||||
const DecodeFnPtr visitor_fn;
|
||||
};
|
||||
|
||||
// DecodePattern and DecodeMapping represent the input data to the decoder
|
||||
// compilation stage. After compilation, the decoder is embodied in the graph
|
||||
// of CompiledDecodeNodes pointer to by compiled_decoder_root_.
|
||||
|
||||
// A DecodePattern maps a pattern of set/unset/don't care (1, 0, x) bits as a
|
||||
// string to the name of its handler.
|
||||
struct DecodePattern {
|
||||
const char* pattern;
|
||||
const char* handler;
|
||||
};
|
||||
|
||||
// A DecodeMapping consists of the name of a handler, the bits sampled in the
|
||||
// instruction by that handler, and a mapping from the pattern that those
|
||||
// sampled bits match to the corresponding name of a node.
|
||||
struct DecodeMapping {
|
||||
const char* name;
|
||||
const uint8_t sampled_bits[kMaxDecodeSampledBits];
|
||||
const DecodePattern mapping[kMaxDecodeMappings];
|
||||
};
|
||||
|
||||
// For speed, before nodes can be used for decoding instructions, they must
|
||||
// be compiled. This converts the mapping "bit pattern strings to decoder name
|
||||
// string" stored in DecodeNodes to an array look up for the pointer to the next
|
||||
// node, stored in CompiledDecodeNodes. Compilation may also apply other
|
||||
// optimisations for simple decode patterns.
|
||||
class CompiledDecodeNode {
|
||||
public:
|
||||
// Constructor for decode node, containing a decode table and pointer to a
|
||||
// function that extracts the bits to be sampled.
|
||||
CompiledDecodeNode(BitExtractFn bit_extract_fn, size_t decode_table_size)
|
||||
: bit_extract_fn_(bit_extract_fn),
|
||||
visitor_fn_(NULL),
|
||||
decode_table_size_(decode_table_size),
|
||||
decoder_(NULL) {
|
||||
decode_table_ = new CompiledDecodeNode*[decode_table_size_];
|
||||
memset(decode_table_, 0, decode_table_size_ * sizeof(decode_table_[0]));
|
||||
}
|
||||
|
||||
// Constructor for wrappers around visitor functions. These require no
|
||||
// decoding, so no bit extraction function or decode table is assigned.
|
||||
explicit CompiledDecodeNode(DecodeFnPtr visitor_fn, Decoder* decoder)
|
||||
: bit_extract_fn_(NULL),
|
||||
visitor_fn_(visitor_fn),
|
||||
decode_table_(NULL),
|
||||
decode_table_size_(0),
|
||||
decoder_(decoder) {}
|
||||
|
||||
~CompiledDecodeNode() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION {
|
||||
// Free the decode table, if this is a compiled, non-leaf node.
|
||||
if (decode_table_ != NULL) {
|
||||
VIXL_ASSERT(!IsLeafNode());
|
||||
delete[] decode_table_;
|
||||
}
|
||||
}
|
||||
|
||||
// Decode the instruction by either sampling the bits using the bit extract
|
||||
// function to find the next node, or, if we're at a leaf, calling the visitor
|
||||
// function.
|
||||
void Decode(const Instruction* instr) const;
|
||||
|
||||
// A leaf node is a wrapper for a visitor function.
|
||||
bool IsLeafNode() const {
|
||||
VIXL_ASSERT(((visitor_fn_ == NULL) && (bit_extract_fn_ != NULL)) ||
|
||||
((visitor_fn_ != NULL) && (bit_extract_fn_ == NULL)));
|
||||
return visitor_fn_ != NULL;
|
||||
}
|
||||
|
||||
// Get a pointer to the next node required in the decode process, based on the
|
||||
// bits sampled by the current node.
|
||||
CompiledDecodeNode* GetNodeForBits(uint32_t bits) const {
|
||||
VIXL_ASSERT(bits < decode_table_size_);
|
||||
return decode_table_[bits];
|
||||
}
|
||||
|
||||
// Set the next node in the decode process for the pattern of sampled bits in
|
||||
// the current node.
|
||||
void SetNodeForBits(uint32_t bits, CompiledDecodeNode* n) {
|
||||
VIXL_ASSERT(bits < decode_table_size_);
|
||||
VIXL_ASSERT(n != NULL);
|
||||
decode_table_[bits] = n;
|
||||
}
|
||||
|
||||
private:
|
||||
// Pointer to an instantiated template function for extracting the bits
|
||||
// sampled by this node. Set to NULL for leaf nodes.
|
||||
const BitExtractFn bit_extract_fn_;
|
||||
|
||||
// Visitor function that handles the instruction identified. Set only for
|
||||
// leaf nodes, where no extra decoding is required, otherwise NULL.
|
||||
const DecodeFnPtr visitor_fn_;
|
||||
|
||||
// Mapping table from instruction bits to next decode stage.
|
||||
CompiledDecodeNode** decode_table_;
|
||||
const size_t decode_table_size_;
|
||||
|
||||
// Pointer to the decoder containing this node, used to call its visitor
|
||||
// function for leaf nodes. Set to NULL for non-leaf nodes.
|
||||
Decoder* decoder_;
|
||||
};
|
||||
|
||||
class DecodeNode {
|
||||
public:
|
||||
// Default constructor needed for map initialisation.
|
||||
DecodeNode() : compiled_node_(NULL) {}
|
||||
|
||||
// Constructor for DecodeNode wrappers around visitor functions. These are
|
||||
// marked as "compiled", as there is no decoding left to do.
|
||||
explicit DecodeNode(const VisitorNode& visitor, Decoder* decoder)
|
||||
: name_(visitor.name),
|
||||
visitor_fn_(visitor.visitor_fn),
|
||||
decoder_(decoder),
|
||||
compiled_node_(NULL) {}
|
||||
|
||||
// Constructor for DecodeNodes that map bit patterns to other DecodeNodes.
|
||||
explicit DecodeNode(const DecodeMapping& map, Decoder* decoder = NULL)
|
||||
: name_(map.name),
|
||||
visitor_fn_(NULL),
|
||||
decoder_(decoder),
|
||||
compiled_node_(NULL) {
|
||||
// The length of the bit string in the first mapping determines the number
|
||||
// of sampled bits. When adding patterns later, we assert that all mappings
|
||||
// sample the same number of bits.
|
||||
VIXL_CHECK(strcmp(map.mapping[0].pattern, "otherwise") != 0);
|
||||
int bit_count = static_cast<int>(strlen(map.mapping[0].pattern));
|
||||
VIXL_CHECK((bit_count > 0) && (bit_count <= 32));
|
||||
SetSampledBits(map.sampled_bits, bit_count);
|
||||
AddPatterns(map.mapping);
|
||||
}
|
||||
|
||||
~DecodeNode() {
|
||||
// Delete the compiled version of this node, if one was created.
|
||||
if (compiled_node_ != NULL) {
|
||||
delete compiled_node_;
|
||||
}
|
||||
}
|
||||
|
||||
// Set the bits sampled from the instruction by this node.
|
||||
void SetSampledBits(const uint8_t* bits, int bit_count);
|
||||
|
||||
// Get the bits sampled from the instruction by this node.
|
||||
std::vector<uint8_t> GetSampledBits() const;
|
||||
|
||||
// Get the number of bits sampled from the instruction by this node.
|
||||
size_t GetSampledBitsCount() const;
|
||||
|
||||
// Add patterns to this node's internal pattern table.
|
||||
void AddPatterns(const DecodePattern* patterns);
|
||||
|
||||
// A leaf node is a DecodeNode that wraps the visitor function for the
|
||||
// identified instruction class.
|
||||
bool IsLeafNode() const { return visitor_fn_ != NULL; }
|
||||
|
||||
std::string GetName() const { return name_; }
|
||||
|
||||
// Create a CompiledDecodeNode of specified table size that uses
|
||||
// bit_extract_fn to sample bits from the instruction.
|
||||
void CreateCompiledNode(BitExtractFn bit_extract_fn, size_t table_size) {
|
||||
VIXL_ASSERT(bit_extract_fn != NULL);
|
||||
VIXL_ASSERT(table_size > 0);
|
||||
compiled_node_ = new CompiledDecodeNode(bit_extract_fn, table_size);
|
||||
}
|
||||
|
||||
// Create a CompiledDecodeNode wrapping a visitor function. No decoding is
|
||||
// required for this node; the visitor function is called instead.
|
||||
void CreateVisitorNode() {
|
||||
compiled_node_ = new CompiledDecodeNode(visitor_fn_, decoder_);
|
||||
}
|
||||
|
||||
// Find and compile the DecodeNode named "name", and set it as the node for
|
||||
// the pattern "bits".
|
||||
void CompileNodeForBits(Decoder* decoder, std::string name, uint32_t bits);
|
||||
|
||||
// Get a pointer to an instruction method that extracts the instruction bits
|
||||
// specified by the mask argument, and returns those sampled bits as a
|
||||
// contiguous sequence, suitable for indexing an array.
|
||||
// For example, a mask of 0b1010 returns a function that, given an instruction
|
||||
// 0bXYZW, will return 0bXZ.
|
||||
BitExtractFn GetBitExtractFunction(uint32_t mask);
|
||||
|
||||
// Get a pointer to an Instruction method that applies a mask to the
|
||||
// instruction bits, and tests if the result is equal to value. The returned
|
||||
// function gives a 1 result if (inst & mask == value), 0 otherwise.
|
||||
BitExtractFn GetBitExtractFunction(uint32_t mask, uint32_t value);
|
||||
|
||||
// Compile this DecodeNode into a new CompiledDecodeNode and returns a pointer
|
||||
// to it. This pointer is also stored inside the DecodeNode itself. Destroying
|
||||
// a DecodeNode frees its associated CompiledDecodeNode.
|
||||
CompiledDecodeNode* Compile(Decoder* decoder);
|
||||
|
||||
// Get a pointer to the CompiledDecodeNode associated with this DecodeNode.
|
||||
// Returns NULL if the node has not been compiled yet.
|
||||
CompiledDecodeNode* GetCompiledNode() const { return compiled_node_; }
|
||||
bool IsCompiled() const { return GetCompiledNode() != NULL; }
|
||||
|
||||
private:
|
||||
// Generate a mask and value pair from a string constructed from 0, 1 and x
|
||||
// (don't care) characters.
|
||||
// For example "10x1" should return mask = 0b1101, value = 0b1001.
|
||||
typedef std::pair<Instr, Instr> MaskValuePair;
|
||||
MaskValuePair GenerateMaskValuePair(std::string pattern) const;
|
||||
|
||||
// Generate a pattern string ordered by the bit positions sampled by this
|
||||
// node. The first character in the string corresponds to the lowest sampled
|
||||
// bit.
|
||||
// For example, a pattern of "1x0" expected when sampling bits 31, 1 and 30
|
||||
// returns the pattern "x01"; bit 1 should be 'x', bit 30 '0' and bit 31 '1'.
|
||||
// This output makes comparisons easier between the pattern and bits sampled
|
||||
// from an instruction using the fast "compress" algorithm. See
|
||||
// Instruction::Compress().
|
||||
std::string GenerateOrderedPattern(std::string pattern) const;
|
||||
|
||||
// Generate a mask with a bit set at each sample position.
|
||||
uint32_t GenerateSampledBitsMask() const;
|
||||
|
||||
// Try to compile a more optimised decode operation for this node, returning
|
||||
// true if successful.
|
||||
bool TryCompileOptimisedDecodeTable(Decoder* decoder);
|
||||
|
||||
// Name of this decoder node, used to construct edges in the decode graph.
|
||||
std::string name_;
|
||||
|
||||
// Vector of bits sampled from an instruction to determine which node to look
|
||||
// up next in the decode process.
|
||||
std::vector<uint8_t> sampled_bits_;
|
||||
|
||||
// Visitor function that handles the instruction identified. Set only for leaf
|
||||
// nodes, where no extra decoding is required. For non-leaf decoding nodes,
|
||||
// this pointer is NULL.
|
||||
DecodeFnPtr visitor_fn_;
|
||||
|
||||
// Source mapping from bit pattern to name of next decode stage.
|
||||
std::vector<DecodePattern> pattern_table_;
|
||||
|
||||
// Pointer to the decoder containing this node, used to call its visitor
|
||||
// function for leaf nodes.
|
||||
Decoder* decoder_;
|
||||
|
||||
// Pointer to the compiled version of this node. Is this node hasn't been
|
||||
// compiled yet, this pointer is NULL.
|
||||
CompiledDecodeNode* compiled_node_;
|
||||
};
|
||||
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_DECODER_AARCH64_H_
|
2138
externals/vixl/vixl/src/aarch64/decoder-constants-aarch64.h
vendored
Normal file
2138
externals/vixl/vixl/src/aarch64/decoder-constants-aarch64.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
10996
externals/vixl/vixl/src/aarch64/disasm-aarch64.cc
vendored
Normal file
10996
externals/vixl/vixl/src/aarch64/disasm-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
242
externals/vixl/vixl/src/aarch64/disasm-aarch64.h
vendored
Normal file
242
externals/vixl/vixl/src/aarch64/disasm-aarch64.h
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
// Copyright 2015, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_DISASM_AARCH64_H
|
||||
#define VIXL_AARCH64_DISASM_AARCH64_H
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "../globals-vixl.h"
|
||||
#include "../utils-vixl.h"
|
||||
|
||||
#include "cpu-features-auditor-aarch64.h"
|
||||
#include "decoder-aarch64.h"
|
||||
#include "instructions-aarch64.h"
|
||||
#include "operands-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
class Disassembler : public DecoderVisitor {
|
||||
public:
|
||||
Disassembler();
|
||||
Disassembler(char* text_buffer, int buffer_size);
|
||||
virtual ~Disassembler();
|
||||
char* GetOutput();
|
||||
|
||||
// Declare all Visitor functions.
|
||||
#define DECLARE(A) \
|
||||
virtual void Visit##A(const Instruction* instr) VIXL_OVERRIDE;
|
||||
VISITOR_LIST(DECLARE)
|
||||
#undef DECLARE
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr);
|
||||
|
||||
// Default output functions. The functions below implement a default way of
|
||||
// printing elements in the disassembly. A sub-class can override these to
|
||||
// customize the disassembly output.
|
||||
|
||||
// Prints the name of a register.
|
||||
// TODO: This currently doesn't allow renaming of V registers.
|
||||
virtual void AppendRegisterNameToOutput(const Instruction* instr,
|
||||
const CPURegister& reg);
|
||||
|
||||
// Prints a PC-relative offset. This is used for example when disassembling
|
||||
// branches to immediate offsets.
|
||||
virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
|
||||
int64_t offset);
|
||||
|
||||
// Prints an address, in the general case. It can be code or data. This is
|
||||
// used for example to print the target address of an ADR instruction.
|
||||
virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some code.
|
||||
// This is used for example to print the target address of a branch to an
|
||||
// immediate offset.
|
||||
// A sub-class can for example override this method to lookup the address and
|
||||
// print an appropriate name.
|
||||
virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Prints the address of some data.
|
||||
// This is used for example to print the source address of a load literal
|
||||
// instruction.
|
||||
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
// Same as the above, but for addresses that are not relative to the code
|
||||
// buffer. They are currently not used by VIXL.
|
||||
virtual void AppendAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendCodeAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
virtual void AppendDataAddressToOutput(const Instruction* instr,
|
||||
const void* addr);
|
||||
|
||||
public:
|
||||
// Get/Set the offset that should be added to code addresses when printing
|
||||
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
|
||||
// helpers.
|
||||
// Below is an example of how a branch immediate instruction in memory at
|
||||
// address 0xb010200 would disassemble with different offsets.
|
||||
// Base address | Disassembly
|
||||
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
|
||||
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
|
||||
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
|
||||
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
|
||||
int64_t CodeRelativeAddress(const void* instr);
|
||||
|
||||
private:
|
||||
void Format(const Instruction* instr,
|
||||
const char* mnemonic,
|
||||
const char* format0,
|
||||
const char* format1 = NULL);
|
||||
void Substitute(const Instruction* instr, const char* string);
|
||||
int SubstituteField(const Instruction* instr, const char* format);
|
||||
int SubstituteRegisterField(const Instruction* instr, const char* format);
|
||||
int SubstitutePredicateRegisterField(const Instruction* instr,
|
||||
const char* format);
|
||||
int SubstituteImmediateField(const Instruction* instr, const char* format);
|
||||
int SubstituteLiteralField(const Instruction* instr, const char* format);
|
||||
int SubstituteBitfieldImmediateField(const Instruction* instr,
|
||||
const char* format);
|
||||
int SubstituteShiftField(const Instruction* instr, const char* format);
|
||||
int SubstituteExtendField(const Instruction* instr, const char* format);
|
||||
int SubstituteConditionField(const Instruction* instr, const char* format);
|
||||
int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
|
||||
int SubstituteBranchTargetField(const Instruction* instr, const char* format);
|
||||
int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
|
||||
int SubstitutePrefetchField(const Instruction* instr, const char* format);
|
||||
int SubstituteBarrierField(const Instruction* instr, const char* format);
|
||||
int SubstituteSysOpField(const Instruction* instr, const char* format);
|
||||
int SubstituteCrField(const Instruction* instr, const char* format);
|
||||
int SubstituteIntField(const Instruction* instr, const char* format);
|
||||
int SubstituteSVESize(const Instruction* instr, const char* format);
|
||||
int SubstituteTernary(const Instruction* instr, const char* format);
|
||||
|
||||
std::pair<unsigned, unsigned> GetRegNumForField(const Instruction* instr,
|
||||
char reg_prefix,
|
||||
const char* field);
|
||||
|
||||
bool RdIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRd() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RnIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRn() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RmIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRm() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool RaIsZROrSP(const Instruction* instr) const {
|
||||
return (instr->GetRa() == kZeroRegCode);
|
||||
}
|
||||
|
||||
bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
|
||||
|
||||
int64_t code_address_offset() const { return code_address_offset_; }
|
||||
|
||||
protected:
|
||||
void ResetOutput();
|
||||
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
|
||||
|
||||
void set_code_address_offset(int64_t code_address_offset) {
|
||||
code_address_offset_ = code_address_offset;
|
||||
}
|
||||
|
||||
char* buffer_;
|
||||
uint32_t buffer_pos_;
|
||||
uint32_t buffer_size_;
|
||||
bool own_buffer_;
|
||||
|
||||
int64_t code_address_offset_;
|
||||
};
|
||||
|
||||
|
||||
class PrintDisassembler : public Disassembler {
|
||||
public:
|
||||
explicit PrintDisassembler(FILE* stream)
|
||||
: cpu_features_auditor_(NULL),
|
||||
cpu_features_prefix_("// Needs: "),
|
||||
cpu_features_suffix_(""),
|
||||
signed_addresses_(false),
|
||||
stream_(stream) {}
|
||||
|
||||
// Convenience helpers for quick disassembly, without having to manually
|
||||
// create a decoder.
|
||||
void DisassembleBuffer(const Instruction* start, uint64_t size);
|
||||
void DisassembleBuffer(const Instruction* start, const Instruction* end);
|
||||
void Disassemble(const Instruction* instr);
|
||||
|
||||
// If a CPUFeaturesAuditor is specified, it will be used to annotate
|
||||
// disassembly. The CPUFeaturesAuditor is expected to visit the instructions
|
||||
// _before_ the disassembler, such that the CPUFeatures information is
|
||||
// available when the disassembler is called.
|
||||
void RegisterCPUFeaturesAuditor(CPUFeaturesAuditor* auditor) {
|
||||
cpu_features_auditor_ = auditor;
|
||||
}
|
||||
|
||||
// Set the prefix to appear before the CPU features annotations.
|
||||
void SetCPUFeaturesPrefix(const char* prefix) {
|
||||
VIXL_ASSERT(prefix != NULL);
|
||||
cpu_features_prefix_ = prefix;
|
||||
}
|
||||
|
||||
// Set the suffix to appear after the CPU features annotations.
|
||||
void SetCPUFeaturesSuffix(const char* suffix) {
|
||||
VIXL_ASSERT(suffix != NULL);
|
||||
cpu_features_suffix_ = suffix;
|
||||
}
|
||||
|
||||
// By default, addresses are printed as simple, unsigned 64-bit hex values.
|
||||
//
|
||||
// With `PrintSignedAddresses(true)`:
|
||||
// - negative addresses are printed as "-0x1234...",
|
||||
// - positive addresses have a leading space, like " 0x1234...", to maintain
|
||||
// alignment.
|
||||
//
|
||||
// This is most useful in combination with Disassembler::MapCodeAddress(...).
|
||||
void PrintSignedAddresses(bool s) { signed_addresses_ = s; }
|
||||
|
||||
protected:
|
||||
virtual void ProcessOutput(const Instruction* instr) VIXL_OVERRIDE;
|
||||
|
||||
CPUFeaturesAuditor* cpu_features_auditor_;
|
||||
const char* cpu_features_prefix_;
|
||||
const char* cpu_features_suffix_;
|
||||
bool signed_addresses_;
|
||||
|
||||
private:
|
||||
FILE* stream_;
|
||||
};
|
||||
} // namespace aarch64
|
||||
} // namespace vixl
|
||||
|
||||
#endif // VIXL_AARCH64_DISASM_AARCH64_H
|
1416
externals/vixl/vixl/src/aarch64/instructions-aarch64.cc
vendored
Normal file
1416
externals/vixl/vixl/src/aarch64/instructions-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
1015
externals/vixl/vixl/src/aarch64/instructions-aarch64.h
vendored
Normal file
1015
externals/vixl/vixl/src/aarch64/instructions-aarch64.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
7419
externals/vixl/vixl/src/aarch64/logic-aarch64.cc
vendored
Normal file
7419
externals/vixl/vixl/src/aarch64/logic-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
3089
externals/vixl/vixl/src/aarch64/macro-assembler-aarch64.cc
vendored
Normal file
3089
externals/vixl/vixl/src/aarch64/macro-assembler-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
7347
externals/vixl/vixl/src/aarch64/macro-assembler-aarch64.h
vendored
Normal file
7347
externals/vixl/vixl/src/aarch64/macro-assembler-aarch64.h
vendored
Normal file
File diff suppressed because it is too large
Load diff
2027
externals/vixl/vixl/src/aarch64/macro-assembler-sve-aarch64.cc
vendored
Normal file
2027
externals/vixl/vixl/src/aarch64/macro-assembler-sve-aarch64.cc
vendored
Normal file
File diff suppressed because it is too large
Load diff
464
externals/vixl/vixl/src/aarch64/operands-aarch64.cc
vendored
Normal file
464
externals/vixl/vixl/src/aarch64/operands-aarch64.cc
vendored
Normal file
|
@ -0,0 +1,464 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include "operands-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// CPURegList utilities.
|
||||
CPURegister CPURegList::PopLowestIndex(RegList mask) {
|
||||
RegList list = list_ & mask;
|
||||
if (list == 0) return NoCPUReg;
|
||||
int index = CountTrailingZeros(list);
|
||||
VIXL_ASSERT(((1 << index) & list) != 0);
|
||||
Remove(index);
|
||||
return CPURegister(index, size_, type_);
|
||||
}
|
||||
|
||||
|
||||
CPURegister CPURegList::PopHighestIndex(RegList mask) {
|
||||
RegList list = list_ & mask;
|
||||
if (list == 0) return NoCPUReg;
|
||||
int index = CountLeadingZeros(list);
|
||||
index = kRegListSizeInBits - 1 - index;
|
||||
VIXL_ASSERT(((1 << index) & list) != 0);
|
||||
Remove(index);
|
||||
return CPURegister(index, size_, type_);
|
||||
}
|
||||
|
||||
|
||||
bool CPURegList::IsValid() const {
|
||||
if (type_ == CPURegister::kNoRegister) {
|
||||
// We can't use IsEmpty here because that asserts IsValid().
|
||||
return list_ == 0;
|
||||
} else {
|
||||
bool is_valid = true;
|
||||
// Try to create a CPURegister for each element in the list.
|
||||
for (int i = 0; i < kRegListSizeInBits; i++) {
|
||||
if (((list_ >> i) & 1) != 0) {
|
||||
is_valid &= CPURegister(i, size_, type_).IsValid();
|
||||
}
|
||||
}
|
||||
return is_valid;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CPURegList::RemoveCalleeSaved() {
|
||||
if (GetType() == CPURegister::kRegister) {
|
||||
Remove(GetCalleeSaved(GetRegisterSizeInBits()));
|
||||
} else if (GetType() == CPURegister::kVRegister) {
|
||||
Remove(GetCalleeSavedV(GetRegisterSizeInBits()));
|
||||
} else {
|
||||
VIXL_ASSERT(GetType() == CPURegister::kNoRegister);
|
||||
VIXL_ASSERT(IsEmpty());
|
||||
// The list must already be empty, so do nothing.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3) {
|
||||
return Union(list_1, Union(list_2, list_3));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4) {
|
||||
return Union(Union(list_1, list_2), Union(list_3, list_4));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3) {
|
||||
return Intersection(list_1, Intersection(list_2, list_3));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4) {
|
||||
return Intersection(Intersection(list_1, list_2),
|
||||
Intersection(list_3, list_4));
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCalleeSaved(unsigned size) {
|
||||
return CPURegList(CPURegister::kRegister, size, 19, 29);
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
|
||||
return CPURegList(CPURegister::kVRegister, size, 8, 15);
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCallerSaved(unsigned size) {
|
||||
// Registers x0-x18 and lr (x30) are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
|
||||
// Do not use lr directly to avoid initialisation order fiasco bugs for users.
|
||||
list.Combine(Register(30, kXRegSize));
|
||||
return list;
|
||||
}
|
||||
|
||||
|
||||
CPURegList CPURegList::GetCallerSavedV(unsigned size) {
|
||||
// Registers d0-d7 and d16-d31 are caller-saved.
|
||||
CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
|
||||
list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
|
||||
return list;
|
||||
}
|
||||
|
||||
|
||||
const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
|
||||
const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
|
||||
const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
|
||||
const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
|
||||
|
||||
// Operand.
|
||||
Operand::Operand(int64_t immediate)
|
||||
: immediate_(immediate),
|
||||
reg_(NoReg),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(0) {}
|
||||
|
||||
Operand::Operand(IntegerOperand immediate)
|
||||
: immediate_(immediate.AsIntN(64)),
|
||||
reg_(NoReg),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(0) {}
|
||||
|
||||
Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
|
||||
: reg_(reg),
|
||||
shift_(shift),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(shift != MSL);
|
||||
VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
|
||||
VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
|
||||
VIXL_ASSERT(!reg.IsSP());
|
||||
}
|
||||
|
||||
|
||||
Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
|
||||
: reg_(reg),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(extend),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(reg.IsValid());
|
||||
VIXL_ASSERT(shift_amount <= 4);
|
||||
VIXL_ASSERT(!reg.IsSP());
|
||||
|
||||
// Extend modes SXTX and UXTX require a 64-bit register.
|
||||
VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsImmediate() const { return reg_.Is(NoReg); }
|
||||
|
||||
|
||||
bool Operand::IsPlainRegister() const {
|
||||
return reg_.IsValid() &&
|
||||
(((shift_ == NO_SHIFT) && (extend_ == NO_EXTEND)) ||
|
||||
// No-op shifts.
|
||||
((shift_ != NO_SHIFT) && (shift_amount_ == 0)) ||
|
||||
// No-op extend operations.
|
||||
// We can't include [US]XTW here without knowing more about the
|
||||
// context; they are only no-ops for 32-bit operations.
|
||||
//
|
||||
// For example, this operand could be replaced with w1:
|
||||
// __ Add(w0, w0, Operand(w1, UXTW));
|
||||
// However, no plain register can replace it in this context:
|
||||
// __ Add(x0, x0, Operand(w1, UXTW));
|
||||
(((extend_ == UXTX) || (extend_ == SXTX)) && (shift_amount_ == 0)));
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsShiftedRegister() const {
|
||||
return reg_.IsValid() && (shift_ != NO_SHIFT);
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsExtendedRegister() const {
|
||||
return reg_.IsValid() && (extend_ != NO_EXTEND);
|
||||
}
|
||||
|
||||
|
||||
bool Operand::IsZero() const {
|
||||
if (IsImmediate()) {
|
||||
return GetImmediate() == 0;
|
||||
} else {
|
||||
return GetRegister().IsZero();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Operand Operand::ToExtendedRegister() const {
|
||||
VIXL_ASSERT(IsShiftedRegister());
|
||||
VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
|
||||
return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
|
||||
}
|
||||
|
||||
|
||||
// MemOperand
|
||||
MemOperand::MemOperand()
|
||||
: base_(NoReg),
|
||||
regoffset_(NoReg),
|
||||
offset_(0),
|
||||
addrmode_(Offset),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND) {}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
|
||||
: base_(base),
|
||||
regoffset_(NoReg),
|
||||
offset_(offset),
|
||||
addrmode_(addrmode),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Extend extend,
|
||||
unsigned shift_amount)
|
||||
: base_(base),
|
||||
regoffset_(regoffset),
|
||||
offset_(0),
|
||||
addrmode_(Offset),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(extend),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
VIXL_ASSERT(!regoffset.IsSP());
|
||||
VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
|
||||
|
||||
// SXTX extend mode requires a 64-bit offset register.
|
||||
VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
|
||||
}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Shift shift,
|
||||
unsigned shift_amount)
|
||||
: base_(base),
|
||||
regoffset_(regoffset),
|
||||
offset_(0),
|
||||
addrmode_(Offset),
|
||||
shift_(shift),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
|
||||
VIXL_ASSERT(shift == LSL);
|
||||
}
|
||||
|
||||
|
||||
MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
|
||||
: base_(base),
|
||||
regoffset_(NoReg),
|
||||
addrmode_(addrmode),
|
||||
shift_(NO_SHIFT),
|
||||
extend_(NO_EXTEND),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
|
||||
|
||||
if (offset.IsImmediate()) {
|
||||
offset_ = offset.GetImmediate();
|
||||
} else if (offset.IsShiftedRegister()) {
|
||||
VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
|
||||
|
||||
regoffset_ = offset.GetRegister();
|
||||
shift_ = offset.GetShift();
|
||||
shift_amount_ = offset.GetShiftAmount();
|
||||
|
||||
extend_ = NO_EXTEND;
|
||||
offset_ = 0;
|
||||
|
||||
// These assertions match those in the shifted-register constructor.
|
||||
VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
|
||||
VIXL_ASSERT(shift_ == LSL);
|
||||
} else {
|
||||
VIXL_ASSERT(offset.IsExtendedRegister());
|
||||
VIXL_ASSERT(addrmode == Offset);
|
||||
|
||||
regoffset_ = offset.GetRegister();
|
||||
extend_ = offset.GetExtend();
|
||||
shift_amount_ = offset.GetShiftAmount();
|
||||
|
||||
shift_ = NO_SHIFT;
|
||||
offset_ = 0;
|
||||
|
||||
// These assertions match those in the extended-register constructor.
|
||||
VIXL_ASSERT(!regoffset_.IsSP());
|
||||
VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
|
||||
VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsPlainRegister() const {
|
||||
return IsImmediateOffset() && (GetOffset() == 0);
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsEquivalentToPlainRegister() const {
|
||||
if (regoffset_.Is(NoReg)) {
|
||||
// Immediate offset, pre-index or post-index.
|
||||
return GetOffset() == 0;
|
||||
} else if (GetRegisterOffset().IsZero()) {
|
||||
// Zero register offset, pre-index or post-index.
|
||||
// We can ignore shift and extend options because they all result in zero.
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsImmediateOffset() const {
|
||||
return (addrmode_ == Offset) && regoffset_.Is(NoReg);
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsRegisterOffset() const {
|
||||
return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
|
||||
}
|
||||
|
||||
|
||||
bool MemOperand::IsPreIndex() const { return addrmode_ == PreIndex; }
|
||||
|
||||
|
||||
bool MemOperand::IsPostIndex() const { return addrmode_ == PostIndex; }
|
||||
|
||||
|
||||
void MemOperand::AddOffset(int64_t offset) {
|
||||
VIXL_ASSERT(IsImmediateOffset());
|
||||
offset_ += offset;
|
||||
}
|
||||
|
||||
|
||||
bool SVEMemOperand::IsValid() const {
|
||||
#ifdef VIXL_DEBUG
|
||||
{
|
||||
// It should not be possible for an SVEMemOperand to match multiple types.
|
||||
int count = 0;
|
||||
if (IsScalarPlusImmediate()) count++;
|
||||
if (IsScalarPlusScalar()) count++;
|
||||
if (IsScalarPlusVector()) count++;
|
||||
if (IsVectorPlusImmediate()) count++;
|
||||
if (IsVectorPlusVector()) count++;
|
||||
VIXL_ASSERT(count <= 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
// We can't have a register _and_ an immediate offset.
|
||||
if ((offset_ != 0) && (!regoffset_.IsNone())) return false;
|
||||
|
||||
if (shift_amount_ != 0) {
|
||||
// Only shift and extend modifiers can take a shift amount.
|
||||
switch (mod_) {
|
||||
case NO_SVE_OFFSET_MODIFIER:
|
||||
case SVE_MUL_VL:
|
||||
return false;
|
||||
case SVE_LSL:
|
||||
case SVE_UXTW:
|
||||
case SVE_SXTW:
|
||||
// Fall through.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return IsScalarPlusImmediate() || IsScalarPlusScalar() ||
|
||||
IsScalarPlusVector() || IsVectorPlusImmediate() ||
|
||||
IsVectorPlusVector();
|
||||
}
|
||||
|
||||
|
||||
bool SVEMemOperand::IsEquivalentToScalar() const {
|
||||
if (IsScalarPlusImmediate()) {
|
||||
return GetImmediateOffset() == 0;
|
||||
}
|
||||
if (IsScalarPlusScalar()) {
|
||||
// We can ignore the shift because it will still result in zero.
|
||||
return GetScalarOffset().IsZero();
|
||||
}
|
||||
// Forms involving vectors are never equivalent to a single scalar.
|
||||
return false;
|
||||
}
|
||||
|
||||
bool SVEMemOperand::IsPlainRegister() const {
|
||||
if (IsScalarPlusImmediate()) {
|
||||
return GetImmediateOffset() == 0;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
GenericOperand::GenericOperand(const CPURegister& reg)
|
||||
: cpu_register_(reg), mem_op_size_(0) {
|
||||
if (reg.IsQ()) {
|
||||
VIXL_ASSERT(reg.GetSizeInBits() > static_cast<int>(kXRegSize));
|
||||
// Support for Q registers is not implemented yet.
|
||||
VIXL_UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GenericOperand::GenericOperand(const MemOperand& mem_op, size_t mem_op_size)
|
||||
: cpu_register_(NoReg), mem_op_(mem_op), mem_op_size_(mem_op_size) {
|
||||
if (mem_op_size_ > kXRegSizeInBytes) {
|
||||
// We only support generic operands up to the size of X registers.
|
||||
VIXL_UNIMPLEMENTED();
|
||||
}
|
||||
}
|
||||
|
||||
bool GenericOperand::Equals(const GenericOperand& other) const {
|
||||
if (!IsValid() || !other.IsValid()) {
|
||||
// Two invalid generic operands are considered equal.
|
||||
return !IsValid() && !other.IsValid();
|
||||
}
|
||||
if (IsCPURegister() && other.IsCPURegister()) {
|
||||
return GetCPURegister().Is(other.GetCPURegister());
|
||||
} else if (IsMemOperand() && other.IsMemOperand()) {
|
||||
return GetMemOperand().Equals(other.GetMemOperand()) &&
|
||||
(GetMemOperandSizeInBytes() == other.GetMemOperandSizeInBytes());
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} // namespace vixl::aarch64
|
978
externals/vixl/vixl/src/aarch64/operands-aarch64.h
vendored
Normal file
978
externals/vixl/vixl/src/aarch64/operands-aarch64.h
vendored
Normal file
|
@ -0,0 +1,978 @@
|
|||
// Copyright 2016, VIXL authors
|
||||
// All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright notice,
|
||||
// this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above copyright notice,
|
||||
// this list of conditions and the following disclaimer in the documentation
|
||||
// and/or other materials provided with the distribution.
|
||||
// * Neither the name of ARM Limited nor the names of its contributors may be
|
||||
// used to endorse or promote products derived from this software without
|
||||
// specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
|
||||
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#ifndef VIXL_AARCH64_OPERANDS_AARCH64_H_
|
||||
#define VIXL_AARCH64_OPERANDS_AARCH64_H_
|
||||
|
||||
#include <sstream>
|
||||
#include <string>
|
||||
|
||||
#include "instructions-aarch64.h"
|
||||
#include "registers-aarch64.h"
|
||||
|
||||
namespace vixl {
|
||||
namespace aarch64 {
|
||||
|
||||
// Lists of registers.
|
||||
class CPURegList {
|
||||
public:
|
||||
explicit CPURegList(CPURegister reg1,
|
||||
CPURegister reg2 = NoCPUReg,
|
||||
CPURegister reg3 = NoCPUReg,
|
||||
CPURegister reg4 = NoCPUReg)
|
||||
: list_(reg1.GetBit() | reg2.GetBit() | reg3.GetBit() | reg4.GetBit()),
|
||||
size_(reg1.GetSizeInBits()),
|
||||
type_(reg1.GetType()) {
|
||||
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
|
||||
: list_(list), size_(size), type_(type) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
CPURegList(CPURegister::RegisterType type,
|
||||
unsigned size,
|
||||
unsigned first_reg,
|
||||
unsigned last_reg)
|
||||
: size_(size), type_(type) {
|
||||
VIXL_ASSERT(
|
||||
((type == CPURegister::kRegister) && (last_reg < kNumberOfRegisters)) ||
|
||||
((type == CPURegister::kVRegister) &&
|
||||
(last_reg < kNumberOfVRegisters)));
|
||||
VIXL_ASSERT(last_reg >= first_reg);
|
||||
list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
|
||||
list_ &= ~((UINT64_C(1) << first_reg) - 1);
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
// Construct an empty CPURegList with the specified size and type. If `size`
|
||||
// is CPURegister::kUnknownSize and the register type requires a size, a valid
|
||||
// but unspecified default will be picked.
|
||||
static CPURegList Empty(CPURegister::RegisterType type,
|
||||
unsigned size = CPURegister::kUnknownSize) {
|
||||
return CPURegList(type, GetDefaultSizeFor(type, size), 0);
|
||||
}
|
||||
|
||||
// Construct a CPURegList with all possible registers with the specified size
|
||||
// and type. If `size` is CPURegister::kUnknownSize and the register type
|
||||
// requires a size, a valid but unspecified default will be picked.
|
||||
static CPURegList All(CPURegister::RegisterType type,
|
||||
unsigned size = CPURegister::kUnknownSize) {
|
||||
unsigned number_of_registers = (CPURegister::GetMaxCodeFor(type) + 1);
|
||||
RegList list = (static_cast<RegList>(1) << number_of_registers) - 1;
|
||||
if (type == CPURegister::kRegister) {
|
||||
// GetMaxCodeFor(kRegister) ignores SP, so explicitly include it.
|
||||
list |= (static_cast<RegList>(1) << kSPRegInternalCode);
|
||||
}
|
||||
return CPURegList(type, GetDefaultSizeFor(type, size), list);
|
||||
}
|
||||
|
||||
CPURegister::RegisterType GetType() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return type_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetType", CPURegister::RegisterType type() const) {
|
||||
return GetType();
|
||||
}
|
||||
|
||||
CPURegister::RegisterBank GetBank() const {
|
||||
return CPURegister::GetBankFor(GetType());
|
||||
}
|
||||
|
||||
// Combine another CPURegList into this one. Registers that already exist in
|
||||
// this list are left unchanged. The type and size of the registers in the
|
||||
// 'other' list must match those in this list.
|
||||
void Combine(const CPURegList& other) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
|
||||
list_ |= other.GetList();
|
||||
}
|
||||
|
||||
// Remove every register in the other CPURegList from this one. Registers that
|
||||
// do not exist in this list are ignored. The type and size of the registers
|
||||
// in the 'other' list must match those in this list.
|
||||
void Remove(const CPURegList& other) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetRegisterSizeInBits() == size_);
|
||||
list_ &= ~other.GetList();
|
||||
}
|
||||
|
||||
// Variants of Combine and Remove which take a single register.
|
||||
void Combine(const CPURegister& other) {
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetSizeInBits() == size_);
|
||||
Combine(other.GetCode());
|
||||
}
|
||||
|
||||
void Remove(const CPURegister& other) {
|
||||
VIXL_ASSERT(other.GetType() == type_);
|
||||
VIXL_ASSERT(other.GetSizeInBits() == size_);
|
||||
Remove(other.GetCode());
|
||||
}
|
||||
|
||||
// Variants of Combine and Remove which take a single register by its code;
|
||||
// the type and size of the register is inferred from this list.
|
||||
void Combine(int code) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ |= (UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
void Remove(int code) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
|
||||
list_ &= ~(UINT64_C(1) << code);
|
||||
}
|
||||
|
||||
static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
|
||||
VIXL_ASSERT(list_1.type_ == list_2.type_);
|
||||
VIXL_ASSERT(list_1.size_ == list_2.size_);
|
||||
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
|
||||
}
|
||||
static CPURegList Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3);
|
||||
static CPURegList Union(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4);
|
||||
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2) {
|
||||
VIXL_ASSERT(list_1.type_ == list_2.type_);
|
||||
VIXL_ASSERT(list_1.size_ == list_2.size_);
|
||||
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
|
||||
}
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3);
|
||||
static CPURegList Intersection(const CPURegList& list_1,
|
||||
const CPURegList& list_2,
|
||||
const CPURegList& list_3,
|
||||
const CPURegList& list_4);
|
||||
|
||||
bool Overlaps(const CPURegList& other) const {
|
||||
return (type_ == other.type_) && ((list_ & other.list_) != 0);
|
||||
}
|
||||
|
||||
RegList GetList() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetList", RegList list() const) { return GetList(); }
|
||||
|
||||
void SetList(RegList new_list) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
list_ = new_list;
|
||||
}
|
||||
VIXL_DEPRECATED("SetList", void set_list(RegList new_list)) {
|
||||
return SetList(new_list);
|
||||
}
|
||||
|
||||
// Remove all callee-saved registers from the list. This can be useful when
|
||||
// preparing registers for an AAPCS64 function call, for example.
|
||||
void RemoveCalleeSaved();
|
||||
|
||||
// Find the register in this list that appears in `mask` with the lowest or
|
||||
// highest code, remove it from the list and return it as a CPURegister. If
|
||||
// the list is empty, leave it unchanged and return NoCPUReg.
|
||||
CPURegister PopLowestIndex(RegList mask = ~static_cast<RegList>(0));
|
||||
CPURegister PopHighestIndex(RegList mask = ~static_cast<RegList>(0));
|
||||
|
||||
// AAPCS64 callee-saved registers.
|
||||
static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
|
||||
static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
|
||||
|
||||
// AAPCS64 caller-saved registers. Note that this includes lr.
|
||||
// TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
|
||||
// 64-bits being caller-saved.
|
||||
static CPURegList GetCallerSaved(unsigned size = kXRegSize);
|
||||
static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
|
||||
|
||||
bool IsEmpty() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return list_ == 0;
|
||||
}
|
||||
|
||||
bool IncludesAliasOf(const CPURegister& other) const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return (GetBank() == other.GetBank()) && IncludesAliasOf(other.GetCode());
|
||||
}
|
||||
|
||||
bool IncludesAliasOf(int code) const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return (((static_cast<RegList>(1) << code) & list_) != 0);
|
||||
}
|
||||
|
||||
int GetCount() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return CountSetBits(list_);
|
||||
}
|
||||
VIXL_DEPRECATED("GetCount", int Count()) const { return GetCount(); }
|
||||
|
||||
int GetRegisterSizeInBits() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return size_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRegisterSizeInBits", int RegisterSizeInBits() const) {
|
||||
return GetRegisterSizeInBits();
|
||||
}
|
||||
|
||||
int GetRegisterSizeInBytes() const {
|
||||
int size_in_bits = GetRegisterSizeInBits();
|
||||
VIXL_ASSERT((size_in_bits % 8) == 0);
|
||||
return size_in_bits / 8;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRegisterSizeInBytes", int RegisterSizeInBytes() const) {
|
||||
return GetRegisterSizeInBytes();
|
||||
}
|
||||
|
||||
unsigned GetTotalSizeInBytes() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return GetRegisterSizeInBytes() * GetCount();
|
||||
}
|
||||
VIXL_DEPRECATED("GetTotalSizeInBytes", unsigned TotalSizeInBytes() const) {
|
||||
return GetTotalSizeInBytes();
|
||||
}
|
||||
|
||||
private:
|
||||
// If `size` is CPURegister::kUnknownSize and the type requires a known size,
|
||||
// then return an arbitrary-but-valid size.
|
||||
//
|
||||
// Otherwise, the size is checked for validity and returned unchanged.
|
||||
static unsigned GetDefaultSizeFor(CPURegister::RegisterType type,
|
||||
unsigned size) {
|
||||
if (size == CPURegister::kUnknownSize) {
|
||||
if (type == CPURegister::kRegister) size = kXRegSize;
|
||||
if (type == CPURegister::kVRegister) size = kQRegSize;
|
||||
// All other types require kUnknownSize.
|
||||
}
|
||||
VIXL_ASSERT(CPURegister(0, size, type).IsValid());
|
||||
return size;
|
||||
}
|
||||
|
||||
RegList list_;
|
||||
int size_;
|
||||
CPURegister::RegisterType type_;
|
||||
|
||||
bool IsValid() const;
|
||||
};
|
||||
|
||||
|
||||
// AAPCS64 callee-saved registers.
|
||||
extern const CPURegList kCalleeSaved;
|
||||
extern const CPURegList kCalleeSavedV;
|
||||
|
||||
|
||||
// AAPCS64 caller-saved registers. Note that this includes lr.
|
||||
extern const CPURegList kCallerSaved;
|
||||
extern const CPURegList kCallerSavedV;
|
||||
|
||||
class IntegerOperand;
|
||||
|
||||
// Operand.
|
||||
class Operand {
|
||||
public:
|
||||
// #<immediate>
|
||||
// where <immediate> is int64_t.
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(int64_t immediate); // NOLINT(runtime/explicit)
|
||||
|
||||
Operand(IntegerOperand immediate); // NOLINT(runtime/explicit)
|
||||
|
||||
// rm, {<shift> #<shift_amount>}
|
||||
// where <shift> is one of {LSL, LSR, ASR, ROR}.
|
||||
// <shift_amount> is uint6_t.
|
||||
// This is allowed to be an implicit constructor because Operand is
|
||||
// a wrapper class that doesn't normally perform any type conversion.
|
||||
Operand(Register reg,
|
||||
Shift shift = LSL,
|
||||
unsigned shift_amount = 0); // NOLINT(runtime/explicit)
|
||||
|
||||
// rm, {<extend> {#<shift_amount>}}
|
||||
// where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
|
||||
// <shift_amount> is uint2_t.
|
||||
explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
|
||||
|
||||
bool IsImmediate() const;
|
||||
bool IsPlainRegister() const;
|
||||
bool IsShiftedRegister() const;
|
||||
bool IsExtendedRegister() const;
|
||||
bool IsZero() const;
|
||||
|
||||
// This returns an LSL shift (<= 4) operand as an equivalent extend operand,
|
||||
// which helps in the encoding of instructions that use the stack pointer.
|
||||
Operand ToExtendedRegister() const;
|
||||
|
||||
int64_t GetImmediate() const {
|
||||
VIXL_ASSERT(IsImmediate());
|
||||
return immediate_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetImmediate", int64_t immediate() const) {
|
||||
return GetImmediate();
|
||||
}
|
||||
|
||||
int64_t GetEquivalentImmediate() const {
|
||||
return IsZero() ? 0 : GetImmediate();
|
||||
}
|
||||
|
||||
Register GetRegister() const {
|
||||
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
return reg_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetRegister", Register reg() const) { return GetRegister(); }
|
||||
Register GetBaseRegister() const { return GetRegister(); }
|
||||
|
||||
Shift GetShift() const {
|
||||
VIXL_ASSERT(IsShiftedRegister());
|
||||
return shift_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetShift", Shift shift() const) { return GetShift(); }
|
||||
|
||||
Extend GetExtend() const {
|
||||
VIXL_ASSERT(IsExtendedRegister());
|
||||
return extend_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetExtend", Extend extend() const) { return GetExtend(); }
|
||||
|
||||
unsigned GetShiftAmount() const {
|
||||
VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
|
||||
return shift_amount_;
|
||||
}
|
||||
VIXL_DEPRECATED("GetShiftAmount", unsigned shift_amount() const) {
|
||||
return GetShiftAmount();
|
||||
}
|
||||
|
||||
private:
|
||||
int64_t immediate_;
|
||||
Register reg_;
|
||||
Shift shift_;
|
||||
Extend extend_;
|
||||
unsigned shift_amount_;
|
||||
};
|
||||
|
||||
|
||||
// MemOperand represents the addressing mode of a load or store instruction.
|
||||
// In assembly syntax, MemOperands are normally denoted by one or more elements
|
||||
// inside or around square brackets.
|
||||
class MemOperand {
|
||||
public:
|
||||
// Creates an invalid `MemOperand`.
|
||||
MemOperand();
|
||||
explicit MemOperand(Register base,
|
||||
int64_t offset = 0,
|
||||
AddrMode addrmode = Offset);
|
||||
MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Shift shift = LSL,
|
||||
unsigned shift_amount = 0);
|
||||
MemOperand(Register base,
|
||||
Register regoffset,
|
||||
Extend extend,
|
||||
unsigned shift_amount = 0);
|
||||
MemOperand(Register base, const Operand& offset, AddrMode addrmode = Offset);
|
||||
|
||||
const Register& GetBaseRegister() const { return base_; }
|
||||
|
||||
// If the MemOperand has a register offset, return it. (This also applies to
|
||||
// pre- and post-index modes.) Otherwise, return NoReg.
|
||||
const Register& GetRegisterOffset() const { return regoffset_; }
|
||||
|
||||
// If the MemOperand has an immediate offset, return it. (This also applies to
|
||||
// pre- and post-index modes.) Otherwise, return 0.
|
||||
int64_t GetOffset() const { return offset_; }
|
||||
|
||||
AddrMode GetAddrMode() const { return addrmode_; }
|
||||
Shift GetShift() const { return shift_; }
|
||||
Extend GetExtend() const { return extend_; }
|
||||
|
||||
unsigned GetShiftAmount() const {
|
||||
// Extend modes can also encode a shift for some instructions.
|
||||
VIXL_ASSERT((GetShift() != NO_SHIFT) || (GetExtend() != NO_EXTEND));
|
||||
return shift_amount_;
|
||||
}
|
||||
|
||||
// True for MemOperands which represent something like [x0].
|
||||
// Currently, this will also return true for [x0, #0], because MemOperand has
|
||||
// no way to distinguish the two.
|
||||
bool IsPlainRegister() const;
|
||||
|
||||
// True for MemOperands which represent something like [x0], or for compound
|
||||
// MemOperands which are functionally equivalent, such as [x0, #0], [x0, xzr]
|
||||
// or [x0, wzr, UXTW #3].
|
||||
bool IsEquivalentToPlainRegister() const;
|
||||
|
||||
// True for immediate-offset (but not indexed) MemOperands.
|
||||
bool IsImmediateOffset() const;
|
||||
// True for register-offset (but not indexed) MemOperands.
|
||||
bool IsRegisterOffset() const;
|
||||
|
||||
bool IsPreIndex() const;
|
||||
bool IsPostIndex() const;
|
||||
|
||||
void AddOffset(int64_t offset);
|
||||
|
||||
bool IsValid() const {
|
||||
return base_.IsValid() &&
|
||||
((addrmode_ == Offset) || (addrmode_ == PreIndex) ||
|
||||
(addrmode_ == PostIndex)) &&
|
||||
((shift_ == NO_SHIFT) || (extend_ == NO_EXTEND)) &&
|
||||
((offset_ == 0) || !regoffset_.IsValid());
|
||||
}
|
||||
|
||||
bool Equals(const MemOperand& other) const {
|
||||
return base_.Is(other.base_) && regoffset_.Is(other.regoffset_) &&
|
||||
(offset_ == other.offset_) && (addrmode_ == other.addrmode_) &&
|
||||
(shift_ == other.shift_) && (extend_ == other.extend_) &&
|
||||
(shift_amount_ == other.shift_amount_);
|
||||
}
|
||||
|
||||
private:
|
||||
Register base_;
|
||||
Register regoffset_;
|
||||
int64_t offset_;
|
||||
AddrMode addrmode_;
|
||||
Shift shift_;
|
||||
Extend extend_;
|
||||
unsigned shift_amount_;
|
||||
};
|
||||
|
||||
// SVE supports memory operands which don't make sense to the core ISA, such as
|
||||
// scatter-gather forms, in which either the base or offset registers are
|
||||
// vectors. This class exists to avoid complicating core-ISA code with
|
||||
// SVE-specific behaviour.
|
||||
//
|
||||
// Note that SVE does not support any pre- or post-index modes.
|
||||
class SVEMemOperand {
|
||||
public:
|
||||
// "vector-plus-immediate", like [z0.s, #21]
|
||||
explicit SVEMemOperand(ZRegister base, uint64_t offset = 0)
|
||||
: base_(base),
|
||||
regoffset_(NoReg),
|
||||
offset_(RawbitsToInt64(offset)),
|
||||
mod_(NO_SVE_OFFSET_MODIFIER),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(IsVectorPlusImmediate());
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
// "scalar-plus-immediate", like [x0], [x0, #42] or [x0, #42, MUL_VL]
|
||||
// The only supported modifiers are NO_SVE_OFFSET_MODIFIER or SVE_MUL_VL.
|
||||
//
|
||||
// Note that VIXL cannot currently distinguish between `SVEMemOperand(x0)` and
|
||||
// `SVEMemOperand(x0, 0)`. This is only significant in scalar-plus-scalar
|
||||
// instructions where xm defaults to xzr. However, users should not rely on
|
||||
// `SVEMemOperand(x0, 0)` being accepted in such cases.
|
||||
explicit SVEMemOperand(Register base,
|
||||
uint64_t offset = 0,
|
||||
SVEOffsetModifier mod = NO_SVE_OFFSET_MODIFIER)
|
||||
: base_(base),
|
||||
regoffset_(NoReg),
|
||||
offset_(RawbitsToInt64(offset)),
|
||||
mod_(mod),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(IsScalarPlusImmediate());
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
// "scalar-plus-scalar", like [x0, x1]
|
||||
// "scalar-plus-vector", like [x0, z1.d]
|
||||
SVEMemOperand(Register base, CPURegister offset)
|
||||
: base_(base),
|
||||
regoffset_(offset),
|
||||
offset_(0),
|
||||
mod_(NO_SVE_OFFSET_MODIFIER),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(IsScalarPlusScalar() || IsScalarPlusVector());
|
||||
if (offset.IsZero()) VIXL_ASSERT(IsEquivalentToScalar());
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
// "scalar-plus-vector", like [x0, z1.d, UXTW]
|
||||
// The type of `mod` can be any `SVEOffsetModifier` (other than LSL), or a
|
||||
// corresponding `Extend` value.
|
||||
template <typename M>
|
||||
SVEMemOperand(Register base, ZRegister offset, M mod)
|
||||
: base_(base),
|
||||
regoffset_(offset),
|
||||
offset_(0),
|
||||
mod_(GetSVEOffsetModifierFor(mod)),
|
||||
shift_amount_(0) {
|
||||
VIXL_ASSERT(mod_ != SVE_LSL); // LSL requires an explicit shift amount.
|
||||
VIXL_ASSERT(IsScalarPlusVector());
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
// "scalar-plus-scalar", like [x0, x1, LSL #1]
|
||||
// "scalar-plus-vector", like [x0, z1.d, LSL #2]
|
||||
// The type of `mod` can be any `SVEOffsetModifier`, or a corresponding
|
||||
// `Shift` or `Extend` value.
|
||||
template <typename M>
|
||||
SVEMemOperand(Register base, CPURegister offset, M mod, unsigned shift_amount)
|
||||
: base_(base),
|
||||
regoffset_(offset),
|
||||
offset_(0),
|
||||
mod_(GetSVEOffsetModifierFor(mod)),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
}
|
||||
|
||||
// "vector-plus-vector", like [z0.d, z1.d, UXTW]
|
||||
template <typename M = SVEOffsetModifier>
|
||||
SVEMemOperand(ZRegister base,
|
||||
ZRegister offset,
|
||||
M mod = NO_SVE_OFFSET_MODIFIER,
|
||||
unsigned shift_amount = 0)
|
||||
: base_(base),
|
||||
regoffset_(offset),
|
||||
offset_(0),
|
||||
mod_(GetSVEOffsetModifierFor(mod)),
|
||||
shift_amount_(shift_amount) {
|
||||
VIXL_ASSERT(IsValid());
|
||||
VIXL_ASSERT(IsVectorPlusVector());
|
||||
}
|
||||
|
||||
// True for SVEMemOperands which represent something like [x0].
|
||||
// This will also return true for [x0, #0], because there is no way
|
||||
// to distinguish the two.
|
||||
bool IsPlainScalar() const {
|
||||
return IsScalarPlusImmediate() && (offset_ == 0);
|
||||
}
|
||||
|
||||
// True for SVEMemOperands which represent something like [x0], or for
|
||||
// compound SVEMemOperands which are functionally equivalent, such as
|
||||
// [x0, #0], [x0, xzr] or [x0, wzr, UXTW #3].
|
||||
bool IsEquivalentToScalar() const;
|
||||
|
||||
// True for SVEMemOperands like [x0], [x0, #0], false for [x0, xzr] and
|
||||
// similar.
|
||||
bool IsPlainRegister() const;
|
||||
|
||||
bool IsScalarPlusImmediate() const {
|
||||
return base_.IsX() && regoffset_.IsNone() &&
|
||||
((mod_ == NO_SVE_OFFSET_MODIFIER) || IsMulVl());
|
||||
}
|
||||
|
||||
bool IsScalarPlusScalar() const {
|
||||
// SVE offers no extend modes for scalar-plus-scalar, so both registers must
|
||||
// be X registers.
|
||||
return base_.IsX() && regoffset_.IsX() &&
|
||||
((mod_ == NO_SVE_OFFSET_MODIFIER) || (mod_ == SVE_LSL));
|
||||
}
|
||||
|
||||
bool IsScalarPlusVector() const {
|
||||
// The modifier can be LSL or an an extend mode (UXTW or SXTW) here. Unlike
|
||||
// in the core ISA, these extend modes do not imply an S-sized lane, so the
|
||||
// modifier is independent from the lane size. The architecture describes
|
||||
// [US]XTW with a D-sized lane as an "unpacked" offset.
|
||||
return base_.IsX() && regoffset_.IsZRegister() &&
|
||||
(regoffset_.IsLaneSizeS() || regoffset_.IsLaneSizeD()) && !IsMulVl();
|
||||
}
|
||||
|
||||
bool IsVectorPlusImmediate() const {
|
||||
return base_.IsZRegister() &&
|
||||
(base_.IsLaneSizeS() || base_.IsLaneSizeD()) &&
|
||||
regoffset_.IsNone() && (mod_ == NO_SVE_OFFSET_MODIFIER);
|
||||
}
|
||||
|
||||
bool IsVectorPlusVector() const {
|
||||
return base_.IsZRegister() && regoffset_.IsZRegister() && (offset_ == 0) &&
|
||||
AreSameFormat(base_, regoffset_) &&
|
||||
(base_.IsLaneSizeS() || base_.IsLaneSizeD());
|
||||
}
|
||||
|
||||
bool IsContiguous() const { return !IsScatterGather(); }
|
||||
bool IsScatterGather() const {
|
||||
return base_.IsZRegister() || regoffset_.IsZRegister();
|
||||
}
|
||||
|
||||
// TODO: If necessary, add helpers like `HasScalarBase()`.
|
||||
|
||||
Register GetScalarBase() const {
|
||||
VIXL_ASSERT(base_.IsX());
|
||||
return Register(base_);
|
||||
}
|
||||
|
||||
ZRegister GetVectorBase() const {
|
||||
VIXL_ASSERT(base_.IsZRegister());
|
||||
VIXL_ASSERT(base_.HasLaneSize());
|
||||
return ZRegister(base_);
|
||||
}
|
||||
|
||||
Register GetScalarOffset() const {
|
||||
VIXL_ASSERT(regoffset_.IsRegister());
|
||||
return Register(regoffset_);
|
||||
}
|
||||
|
||||
ZRegister GetVectorOffset() const {
|
||||
VIXL_ASSERT(regoffset_.IsZRegister());
|
||||
VIXL_ASSERT(regoffset_.HasLaneSize());
|
||||
return ZRegister(regoffset_);
|
||||
}
|
||||
|
||||
int64_t GetImmediateOffset() const {
|
||||
VIXL_ASSERT(regoffset_.IsNone());
|
||||
return offset_;
|
||||
}
|
||||
|
||||
SVEOffsetModifier GetOffsetModifier() const { return mod_; }
|
||||
unsigned GetShiftAmount() const { return shift_amount_; }
|
||||
|
||||
bool IsEquivalentToLSL(unsigned amount) const {
|
||||
if (shift_amount_ != amount) return false;
|
||||
if (amount == 0) {
|
||||
// No-shift is equivalent to "LSL #0".
|
||||
return ((mod_ == SVE_LSL) || (mod_ == NO_SVE_OFFSET_MODIFIER));
|
||||
}
|
||||
return mod_ == SVE_LSL;
|
||||
}
|
||||
|
||||
bool IsMulVl() const { return mod_ == SVE_MUL_VL; }
|
||||
|
||||
bool IsValid() const;
|
||||
|
||||
private:
|
||||
// Allow standard `Shift` and `Extend` arguments to be used.
|
||||
SVEOffsetModifier GetSVEOffsetModifierFor(Shift shift) {
|
||||
if (shift == LSL) return SVE_LSL;
|
||||
if (shift == NO_SHIFT) return NO_SVE_OFFSET_MODIFIER;
|
||||
// SVE does not accept any other shift.
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return NO_SVE_OFFSET_MODIFIER;
|
||||
}
|
||||
|
||||
SVEOffsetModifier GetSVEOffsetModifierFor(Extend extend = NO_EXTEND) {
|
||||
if (extend == UXTW) return SVE_UXTW;
|
||||
if (extend == SXTW) return SVE_SXTW;
|
||||
if (extend == NO_EXTEND) return NO_SVE_OFFSET_MODIFIER;
|
||||
// SVE does not accept any other extend mode.
|
||||
VIXL_UNIMPLEMENTED();
|
||||
return NO_SVE_OFFSET_MODIFIER;
|
||||
}
|
||||
|
||||
SVEOffsetModifier GetSVEOffsetModifierFor(SVEOffsetModifier mod) {
|
||||
return mod;
|
||||
}
|
||||
|
||||
CPURegister base_;
|
||||
CPURegister regoffset_;
|
||||
int64_t offset_;
|
||||
SVEOffsetModifier mod_;
|
||||
unsigned shift_amount_;
|
||||
};
|
||||
|
||||
// Represent a signed or unsigned integer operand.
|
||||
//
|
||||
// This is designed to make instructions which naturally accept a _signed_
|
||||
// immediate easier to implement and use, when we also want users to be able to
|
||||
// specify raw-bits values (such as with hexadecimal constants). The advantage
|
||||
// of this class over a simple uint64_t (with implicit C++ sign-extension) is
|
||||
// that this class can strictly check the range of allowed values. With a simple
|
||||
// uint64_t, it is impossible to distinguish -1 from UINT64_MAX.
|
||||
//
|
||||
// For example, these instructions are equivalent:
|
||||
//
|
||||
// __ Insr(z0.VnB(), -1);
|
||||
// __ Insr(z0.VnB(), 0xff);
|
||||
//
|
||||
// ... as are these:
|
||||
//
|
||||
// __ Insr(z0.VnD(), -1);
|
||||
// __ Insr(z0.VnD(), 0xffffffffffffffff);
|
||||
//
|
||||
// ... but this is invalid:
|
||||
//
|
||||
// __ Insr(z0.VnB(), 0xffffffffffffffff); // Too big for B-sized lanes.
|
||||
class IntegerOperand {
|
||||
public:
|
||||
#define VIXL_INT_TYPES(V) \
|
||||
V(char) V(short) V(int) V(long) V(long long) // NOLINT(runtime/int)
|
||||
#define VIXL_DECL_INT_OVERLOADS(T) \
|
||||
/* These are allowed to be implicit constructors because this is a */ \
|
||||
/* wrapper class that doesn't normally perform any type conversion. */ \
|
||||
IntegerOperand(signed T immediate) /* NOLINT(runtime/explicit) */ \
|
||||
: raw_bits_(immediate), /* Allow implicit sign-extension. */ \
|
||||
is_negative_(immediate < 0) {} \
|
||||
IntegerOperand(unsigned T immediate) /* NOLINT(runtime/explicit) */ \
|
||||
: raw_bits_(immediate), is_negative_(false) {}
|
||||
VIXL_INT_TYPES(VIXL_DECL_INT_OVERLOADS)
|
||||
#undef VIXL_DECL_INT_OVERLOADS
|
||||
#undef VIXL_INT_TYPES
|
||||
|
||||
// TODO: `Operand` can currently only hold an int64_t, so some large, unsigned
|
||||
// values will be misrepresented here.
|
||||
explicit IntegerOperand(const Operand& operand)
|
||||
: raw_bits_(operand.GetEquivalentImmediate()),
|
||||
is_negative_(operand.GetEquivalentImmediate() < 0) {}
|
||||
|
||||
bool IsIntN(unsigned n) const {
|
||||
return is_negative_ ? vixl::IsIntN(n, RawbitsToInt64(raw_bits_))
|
||||
: vixl::IsIntN(n, raw_bits_);
|
||||
}
|
||||
bool IsUintN(unsigned n) const {
|
||||
return !is_negative_ && vixl::IsUintN(n, raw_bits_);
|
||||
}
|
||||
|
||||
bool IsUint8() const { return IsUintN(8); }
|
||||
bool IsUint16() const { return IsUintN(16); }
|
||||
bool IsUint32() const { return IsUintN(32); }
|
||||
bool IsUint64() const { return IsUintN(64); }
|
||||
|
||||
bool IsInt8() const { return IsIntN(8); }
|
||||
bool IsInt16() const { return IsIntN(16); }
|
||||
bool IsInt32() const { return IsIntN(32); }
|
||||
bool IsInt64() const { return IsIntN(64); }
|
||||
|
||||
bool FitsInBits(unsigned n) const {
|
||||
return is_negative_ ? IsIntN(n) : IsUintN(n);
|
||||
}
|
||||
bool FitsInLane(const CPURegister& zd) const {
|
||||
return FitsInBits(zd.GetLaneSizeInBits());
|
||||
}
|
||||
bool FitsInSignedLane(const CPURegister& zd) const {
|
||||
return IsIntN(zd.GetLaneSizeInBits());
|
||||
}
|
||||
bool FitsInUnsignedLane(const CPURegister& zd) const {
|
||||
return IsUintN(zd.GetLaneSizeInBits());
|
||||
}
|
||||
|
||||
// Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to an unsigned integer
|
||||
// in the range [0, UINT<n>_MAX] (using two's complement mapping).
|
||||
uint64_t AsUintN(unsigned n) const {
|
||||
VIXL_ASSERT(FitsInBits(n));
|
||||
return raw_bits_ & GetUintMask(n);
|
||||
}
|
||||
|
||||
uint8_t AsUint8() const { return static_cast<uint8_t>(AsUintN(8)); }
|
||||
uint16_t AsUint16() const { return static_cast<uint16_t>(AsUintN(16)); }
|
||||
uint32_t AsUint32() const { return static_cast<uint32_t>(AsUintN(32)); }
|
||||
uint64_t AsUint64() const { return AsUintN(64); }
|
||||
|
||||
// Cast a value in the range [INT<n>_MIN, UINT<n>_MAX] to a signed integer in
|
||||
// the range [INT<n>_MIN, INT<n>_MAX] (using two's complement mapping).
|
||||
int64_t AsIntN(unsigned n) const {
|
||||
VIXL_ASSERT(FitsInBits(n));
|
||||
return ExtractSignedBitfield64(n - 1, 0, raw_bits_);
|
||||
}
|
||||
|
||||
int8_t AsInt8() const { return static_cast<int8_t>(AsIntN(8)); }
|
||||
int16_t AsInt16() const { return static_cast<int16_t>(AsIntN(16)); }
|
||||
int32_t AsInt32() const { return static_cast<int32_t>(AsIntN(32)); }
|
||||
int64_t AsInt64() const { return AsIntN(64); }
|
||||
|
||||
// Several instructions encode a signed int<N>_t, which is then (optionally)
|
||||
// left-shifted and sign-extended to a Z register lane with a size which may
|
||||
// be larger than N. This helper tries to find an int<N>_t such that the
|
||||
// IntegerOperand's arithmetic value is reproduced in each lane.
|
||||
//
|
||||
// This is the mechanism that allows `Insr(z0.VnB(), 0xff)` to be treated as
|
||||
// `Insr(z0.VnB(), -1)`.
|
||||
template <unsigned N, unsigned kShift, typename T>
|
||||
bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd, T* imm) const {
|
||||
VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
|
||||
VIXL_ASSERT(FitsInLane(zd));
|
||||
if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
|
||||
|
||||
// Reverse the specified left-shift.
|
||||
IntegerOperand unshifted(*this);
|
||||
unshifted.ArithmeticShiftRight(kShift);
|
||||
|
||||
if (unshifted.IsIntN(N)) {
|
||||
// This is trivial, since sign-extension produces the same arithmetic
|
||||
// value irrespective of the destination size.
|
||||
*imm = static_cast<T>(unshifted.AsIntN(N));
|
||||
return true;
|
||||
}
|
||||
|
||||
// Otherwise, we might be able to use the sign-extension to produce the
|
||||
// desired bit pattern. We can only do this for values in the range
|
||||
// [INT<N>_MAX + 1, UINT<N>_MAX], where the highest set bit is the sign bit.
|
||||
//
|
||||
// The lane size has to be adjusted to compensate for `kShift`, since the
|
||||
// high bits will be dropped when the encoded value is left-shifted.
|
||||
if (unshifted.IsUintN(zd.GetLaneSizeInBits() - kShift)) {
|
||||
int64_t encoded = unshifted.AsIntN(zd.GetLaneSizeInBits() - kShift);
|
||||
if (vixl::IsIntN(N, encoded)) {
|
||||
*imm = static_cast<T>(encoded);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// As above, but `kShift` is written to the `*shift` parameter on success, so
|
||||
// that it is easy to chain calls like this:
|
||||
//
|
||||
// if (imm.TryEncodeAsShiftedIntNForLane<8, 0>(zd, &imm8, &shift) ||
|
||||
// imm.TryEncodeAsShiftedIntNForLane<8, 8>(zd, &imm8, &shift)) {
|
||||
// insn(zd, imm8, shift)
|
||||
// }
|
||||
template <unsigned N, unsigned kShift, typename T, typename S>
|
||||
bool TryEncodeAsShiftedIntNForLane(const CPURegister& zd,
|
||||
T* imm,
|
||||
S* shift) const {
|
||||
if (TryEncodeAsShiftedIntNForLane<N, kShift>(zd, imm)) {
|
||||
*shift = kShift;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// As above, but assume that `kShift` is 0.
|
||||
template <unsigned N, typename T>
|
||||
bool TryEncodeAsIntNForLane(const CPURegister& zd, T* imm) const {
|
||||
return TryEncodeAsShiftedIntNForLane<N, 0>(zd, imm);
|
||||
}
|
||||
|
||||
// As above, but for unsigned fields. This is usuaully a simple operation, but
|
||||
// is provided for symmetry.
|
||||
template <unsigned N, unsigned kShift, typename T>
|
||||
bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd, T* imm) const {
|
||||
VIXL_STATIC_ASSERT(std::numeric_limits<T>::digits > N);
|
||||
VIXL_ASSERT(FitsInLane(zd));
|
||||
|
||||
// TODO: Should we convert -1 to 0xff here?
|
||||
if (is_negative_) return false;
|
||||
USE(zd);
|
||||
|
||||
if ((raw_bits_ & GetUintMask(kShift)) != 0) return false;
|
||||
|
||||
if (vixl::IsUintN(N, raw_bits_ >> kShift)) {
|
||||
*imm = static_cast<T>(raw_bits_ >> kShift);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
template <unsigned N, unsigned kShift, typename T, typename S>
|
||||
bool TryEncodeAsShiftedUintNForLane(const CPURegister& zd,
|
||||
T* imm,
|
||||
S* shift) const {
|
||||
if (TryEncodeAsShiftedUintNForLane<N, kShift>(zd, imm)) {
|
||||
*shift = kShift;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool IsZero() const { return raw_bits_ == 0; }
|
||||
bool IsNegative() const { return is_negative_; }
|
||||
bool IsPositiveOrZero() const { return !is_negative_; }
|
||||
|
||||
uint64_t GetMagnitude() const {
|
||||
return is_negative_ ? -raw_bits_ : raw_bits_;
|
||||
}
|
||||
|
||||
private:
|
||||
// Shift the arithmetic value right, with sign extension if is_negative_.
|
||||
void ArithmeticShiftRight(int shift) {
|
||||
VIXL_ASSERT((shift >= 0) && (shift < 64));
|
||||
if (shift == 0) return;
|
||||
if (is_negative_) {
|
||||
raw_bits_ = ExtractSignedBitfield64(63, shift, raw_bits_);
|
||||
} else {
|
||||
raw_bits_ >>= shift;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t raw_bits_;
|
||||
bool is_negative_;
|
||||
};
|
||||
|
||||
// This an abstraction that can represent a register or memory location. The
|
||||
// `MacroAssembler` provides helpers to move data between generic operands.
|
||||
class GenericOperand {
|
||||
public:
|
||||
GenericOperand() { VIXL_ASSERT(!IsValid()); }
|
||||
GenericOperand(const CPURegister& reg); // NOLINT(runtime/explicit)
|
||||
GenericOperand(const MemOperand& mem_op,
|
||||
size_t mem_op_size = 0); // NOLINT(runtime/explicit)
|
||||
|
||||
bool IsValid() const { return cpu_register_.IsValid() != mem_op_.IsValid(); }
|
||||
|
||||
bool Equals(const GenericOperand& other) const;
|
||||
|
||||
bool IsCPURegister() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return cpu_register_.IsValid();
|
||||
}
|
||||
|
||||
bool IsRegister() const {
|
||||
return IsCPURegister() && cpu_register_.IsRegister();
|
||||
}
|
||||
|
||||
bool IsVRegister() const {
|
||||
return IsCPURegister() && cpu_register_.IsVRegister();
|
||||
}
|
||||
|
||||
bool IsSameCPURegisterType(const GenericOperand& other) {
|
||||
return IsCPURegister() && other.IsCPURegister() &&
|
||||
GetCPURegister().IsSameType(other.GetCPURegister());
|
||||
}
|
||||
|
||||
bool IsMemOperand() const {
|
||||
VIXL_ASSERT(IsValid());
|
||||
return mem_op_.IsValid();
|
||||
}
|
||||
|
||||
CPURegister GetCPURegister() const {
|
||||
VIXL_ASSERT(IsCPURegister());
|
||||
return cpu_register_;
|
||||
}
|
||||
|
||||
MemOperand GetMemOperand() const {
|
||||
VIXL_ASSERT(IsMemOperand());
|
||||
return mem_op_;
|
||||
}
|
||||
|
||||
size_t GetMemOperandSizeInBytes() const {
|
||||
VIXL_ASSERT(IsMemOperand());
|
||||
return mem_op_size_;
|
||||
}
|
||||
|
||||
size_t GetSizeInBytes() const {
|
||||
return IsCPURegister() ? cpu_register_.GetSizeInBytes()
|
||||
: GetMemOperandSizeInBytes();
|
||||
}
|
||||
|
||||
size_t GetSizeInBits() const { return GetSizeInBytes() * kBitsPerByte; }
|
||||
|
||||
private:
|
||||
CPURegister cpu_register_;
|
||||
MemOperand mem_op_;
|
||||
// The size of the memory region pointed to, in bytes.
|
||||
// We only support sizes up to X/D register sizes.
|
||||
size_t mem_op_size_;
|
||||
};
|
||||
}
|
||||
} // namespace vixl::aarch64
|
||||
|
||||
#endif // VIXL_AARCH64_OPERANDS_AARCH64_H_
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue