Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
H
hpvm-release
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
llvm
hpvm-release
Commits
ba914820
Commit
ba914820
authored
4 years ago
by
Akash Kothari
Browse files
Options
Downloads
Patches
Plain Diff
Moved VISC to HPVM in the intrinsics table
parent
f6227e16
No related branches found
No related tags found
No related merge requests found
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
hpvm/llvm_patches/include/IR/IntrinsicsApproxHPVM.td
+101
-121
101 additions, 121 deletions
hpvm/llvm_patches/include/IR/IntrinsicsApproxHPVM.td
with
101 additions
and
121 deletions
hpvm/llvm_patches/include/IR/Intrinsics
VISC
.td
→
hpvm/llvm_patches/include/IR/Intrinsics
ApproxHPVM
.td
+
101
−
121
View file @
ba914820
//===- Intrinsics
VISC
.td - Defines
VISC
intrinsics ---------*- tablegen -*-===//
//===- Intrinsics
HPVM
.td - Defines
HPVM
intrinsics ---------*- tablegen -*-===//
//
// The LLVM Compiler Infrastructure
//
...
...
@@ -7,268 +7,250 @@
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the
VISC
-specific intrinsics.
// This file defines all of the
HPVM
-specific intrinsics.
//
//===----------------------------------------------------------------------===//
let TargetPrefix = "
visc
" in {
/* All intrinsics start with "llvm.
visc
."
let TargetPrefix = "
hpvm
" in {
/* All intrinsics start with "llvm.
hpvm
."
* As we do not want the compiler to mess with these intrinsics, we assume
* worst memory behavior for all these intrinsics.
*/
/* Initialization intrinsic -
* i8* llvm.
visc
.setup(function*);
* i8* llvm.
hpvm
.setup(function*);
*/
def int_
visc
_init : Intrinsic<[], [], []>;
def int_
hpvm
_init : Intrinsic<[], [], []>;
/* Launch intrinsic - with streaming argument
* i8* llvm.
visc
.launch(i8*, ArgList*, i1);
* i8* llvm.
hpvm
.launch(i8*, ArgList*, i1);
*/
def int_
visc
_launch : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_launch : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_ptr_ty, llvm_i1_ty], []>;
/* Push intrinsic - push data on streaming pipeline
* void llvm.
visc
.push(i8*, ArgList*);
* void llvm.
hpvm
.push(i8*, ArgList*);
*/
def int_
visc
_push : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
def int_
hpvm
_push : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
/* Pop intrinsic - pop data from streaming pipeline
* i8* llvm.
visc
.pop(i8*);
* i8* llvm.
hpvm
.pop(i8*);
*/
def int_
visc
_pop : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
def int_
hpvm
_pop : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
/* Cleanup intrinsic -
* void llvm.
visc
.cleanup(i8*);
* void llvm.
hpvm
.cleanup(i8*);
*/
def int_
visc
_cleanup : Intrinsic<[], [], []>;
def int_
hpvm
_cleanup : Intrinsic<[], [], []>;
/* Wait intrinsic -
* void llvm.
visc
.wait(graphID*);
* void llvm.
hpvm
.wait(graphID*);
*/
def int_
visc
_wait : Intrinsic<[], [llvm_ptr_ty], []>;
def int_
hpvm
_wait : Intrinsic<[], [llvm_ptr_ty], []>;
/* Track memory intrinsic -
* void llvm.
visc
.trackMemory(i8*, i64);
* void llvm.
hpvm
.trackMemory(i8*, i64);
*/
def int_
visc
_trackMemory : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], []>;
def int_
hpvm
_trackMemory : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], []>;
/* Track memory intrinsic -
* void llvm.
visc
.untrackMemory(i8*);
* void llvm.
hpvm
.untrackMemory(i8*);
*/
def int_
visc
_untrackMemory : Intrinsic<[], [llvm_ptr_ty], []>;
def int_
hpvm
_untrackMemory : Intrinsic<[], [llvm_ptr_ty], []>;
/* Request memory intrinsic -
* void llvm.
visc
.requestMemory(i8*, i64);
* void llvm.
hpvm
.requestMemory(i8*, i64);
*/
def int_
visc
_requestMemory : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], []>;
def int_
hpvm
_requestMemory : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], []>;
/* Create Node intrinsic -
* i8* llvm.
visc
.createNode(function*);
* i8* llvm.
hpvm
.createNode(function*);
*/
def int_
visc
_createNode : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
def int_
hpvm
_createNode : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
/* Create Node 1D array intrinsic -
* i8* llvm.
visc
.createNode1D(function*, i64);
* i8* llvm.
hpvm
.createNode1D(function*, i64);
*/
def int_
visc
_createNode1D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_createNode1D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_i64_ty], []>;
/* Create Node 2D array intrinsic -
* i8* llvm.
visc
.createNode2D(function*, i64, i64);
* i8* llvm.
hpvm
.createNode2D(function*, i64, i64);
*/
def int_
visc
_createNode2D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_createNode2D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_i64_ty, llvm_i64_ty], []>;
/* Create Node 3D array intrinsic -
* i8* llvm.
visc
.createNode2D(function*, i64, i64, i64);
* i8* llvm.
hpvm
.createNode2D(function*, i64, i64, i64);
*/
def int_
visc
_createNode3D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_createNode3D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_i64_ty, llvm_i64_ty, llvm_i64_ty],
[]>;
/* Create dataflow edge intrinsic -
* i8* llvm.
visc
.createEdge(i8*, i8*, i1, i32, i32, i1);
* i8* llvm.
hpvm
.createEdge(i8*, i8*, i1, i32, i32, i1);
*/
def int_
visc
_createEdge : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
def int_
hpvm
_createEdge : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
llvm_i1_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i1_ty],
[]>;
/* Create bind input intrinsic -
* void llvm.
visc
.bind.input(i8*, i32, i32);
* void llvm.
hpvm
.bind.input(i8*, i32, i32);
*/
def int_
visc
_bind_input : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty,
def int_
hpvm
_bind_input : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i1_ty], []>;
/* Create bind output intrinsic -
* void llvm.
visc
.bind.output(i8*, i32, i32);
* void llvm.
hpvm
.bind.output(i8*, i32, i32);
*/
def int_
visc
_bind_output : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty,
def int_
hpvm
_bind_output : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i1_ty], []>;
/* Find associated dataflow node intrinsic -
* i8* llvm.
visc
.getNode();
* i8* llvm.
hpvm
.getNode();
*/
def int_
visc
_getNode : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
def int_
hpvm
_getNode : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
/* Find parent dataflow node intrinsic -
* i8* llvm.
visc
.getParentNode(i8*);
* i8* llvm.
hpvm
.getParentNode(i8*);
*/
def int_
visc
_getParentNode : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>;
def int_
hpvm
_getParentNode : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>;
/* Find the number of dimensions of a dataflow node intrinsic -
* i32 llvm.
visc
.getNumDims(i8*);
* i32 llvm.
hpvm
.getNumDims(i8*);
*/
def int_
visc
_getNumDims : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
def int_
hpvm
_getNumDims : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
/* Find the unique indentifier of a dataflow node (with respect to his parent
* node) in the specified dimension intrinsic -
*/
/* i64 llvm.
visc
.getNodeInstanceID.[xyz](i8*);
/* i64 llvm.
hpvm
.getNodeInstanceID.[xyz](i8*);
*/
def int_
visc
_getNodeInstanceID_x : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
def int_
hpvm
_getNodeInstanceID_x : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrNoMem]>;
def int_
visc
_getNodeInstanceID_y : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
def int_
hpvm
_getNodeInstanceID_y : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrNoMem]>;
def int_
visc
_getNodeInstanceID_z : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
def int_
hpvm
_getNodeInstanceID_z : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrNoMem]>;
/* Find the number of instances of a dataflow node in the specified dimension
* intrinsic -
*/
/* i64 llvm.
visc
.getNumNodeInstances.[xyz](i8*);
/* i64 llvm.
hpvm
.getNumNodeInstances.[xyz](i8*);
*/
def int_
visc
_getNumNodeInstances_x : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
def int_
hpvm
_getNumNodeInstances_x : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrNoMem]>;
def int_
visc
_getNumNodeInstances_y : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
def int_
hpvm
_getNumNodeInstances_y : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrNoMem]>;
def int_
visc
_getNumNodeInstances_z : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
def int_
hpvm
_getNumNodeInstances_z : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrNoMem]>;
/* Local Barrier
* void llvm.
visc
.barrier();
* void llvm.
hpvm
.barrier();
*/
def int_
visc
_barrier : Intrinsic<[], [], []>;
def int_
hpvm
_barrier : Intrinsic<[], [], []>;
/* Memory allocation inside the graph
* i8* llvm.
visc
.malloc();
* i8* llvm.
hpvm
.malloc();
*/
def int_
visc
_malloc : Intrinsic<[llvm_ptr_ty], [llvm_i64_ty], []>;
def int_
hpvm
_malloc : Intrinsic<[llvm_ptr_ty], [llvm_i64_ty], []>;
/* Find the vector length supported by target architecture
* intrinsic -
* i32 llvm.
visc
.getVectorLength();
* i32 llvm.
hpvm
.getVectorLength();
*/
def int_
visc
_getVectorLength : Intrinsic<[llvm_i32_ty], [], []>;
def int_
hpvm
_getVectorLength : Intrinsic<[llvm_i32_ty], [], []>;
/* ============ Atomic intrinsics ============= */
// Atomic arithmetic operations
/* i32 llvm.visc.atomic.cmpxchg(i32*, i32)*/
def int_visc_atomic_cmpxchg: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty,
llvm_i32_ty], []>;
/* i32 llvm.
visc
.atomic.add(i32*, i32)*/
def int_
visc
_atomic_add: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
/* i32 llvm.
hpvm
.atomic.add(i32*, i32)*/
def int_
hpvm
_atomic_add: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/* i32 llvm.
visc
.atomic.sub(i32*, i32)*/
def int_
visc
_atomic_sub: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
/* i32 llvm.
hpvm
.atomic.sub(i32*, i32)*/
def int_
hpvm
_atomic_sub: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/* i32 llvm.
visc
.atomic.xchg(i32*, i32)*/
def int_
visc
_atomic_xchg: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
/* i32 llvm.
hpvm
.atomic.xchg(i32*, i32)*/
def int_
hpvm
_atomic_xchg: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/* i32 llvm.
visc
.atomic.in
c
(i32*, i32)*/
def int_
visc
_atomic_in
c
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty],
/* i32 llvm.
hpvm
.atomic.
m
in(i32*, i32)*/
def int_
hpvm
_atomic_
m
in: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty
, llvm_i32_ty
],
[]>;
/* i32 llvm.
visc
.atomic.
dec(
i32*, i32)*/
def int_
visc
_atomic_
dec
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty],
/* i32 llvm.
hpvm
.atomic.
max
i32*, i32)*/
def int_
hpvm
_atomic_
max
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty
, llvm_i32_ty
],
[]>;
/* i32 llvm.visc.atomic.min(i32*, i32)*/
def int_visc_atomic_min: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
// Atomic bitwise operations
/* i32 llvm.
visc
.atomic.
umin
(i32*, i32)*/
def int_
visc
_atomic_
umin
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
/* i32 llvm.
hpvm
.atomic.
and
(i32*, i32)*/
def int_
hpvm
_atomic_
and
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/* i32 llvm.
visc
.atomic.
max
i32*, i32)*/
def int_
visc
_atomic_
max
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
/* i32 llvm.
hpvm
.atomic.
or(
i32*, i32)*/
def int_
hpvm
_atomic_
or
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/* i32 llvm.
visc
.atomic.
umax
i32*, i32)*/
def int_
visc
_atomic_
umax
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
/* i32 llvm.
hpvm
.atomic.
xor(
i32*, i32)*/
def int_
hpvm
_atomic_
xor
: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
// Atomic bitwise operations
/* i32 llvm.visc.atomic.and(i32*, i32)*/
def int_visc_atomic_and: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/* i32 llvm.visc.atomic.or(i32*, i32)*/
def int_visc_atomic_or: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/* i32 llvm.visc.atomic.xor(i32*, i32)*/
def int_visc_atomic_xor: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[]>;
/***************************************************************************/
/***************************************************************************/
/* ApproxHPVM intrinsics */
/***************************************************************************/
/* Tensor add intrinsic
* i8* llvm.
visc
.tensor.add(i8*, i8*);
* i8* llvm.
hpvm
.tensor.add(i8*, i8*);
*/
def int_
visc
_tensor_add : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_add : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_ptr_ty], []>;
/* Tensor mul intrinsic
* i8* llvm.
visc
.tensor.mul(i8*, i8*);
* i8* llvm.
hpvm
.tensor.mul(i8*, i8*);
*/
def int_
visc
_tensor_mul : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_mul : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_ptr_ty], []>;
/* Tensor relu intrinsic
* i8* llvm.
visc
.tensor.relu(i8*);
* i8* llvm.
hpvm
.tensor.relu(i8*);
*/
def int_
visc
_tensor_relu : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
def int_
hpvm
_tensor_relu : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
/* Tensor clipped relu intrinsic
* i8* llvm.
visc
.tensor.clipped.relu(i8*);
* i8* llvm.
hpvm
.tensor.clipped.relu(i8*);
*/
def int_
visc
_tensor_clipped_relu : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
def int_
hpvm
_tensor_clipped_relu : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
/* Tensor tanh intrinsic
* i8* llvm.
visc
.tensor.tanh(i8*);
* i8* llvm.
hpvm
.tensor.tanh(i8*);
*/
def int_
visc
_tensor_tanh : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
def int_
hpvm
_tensor_tanh : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
/* Tensor sigmoid intrinsic
* i8* llvm.
visc
.tensor.sigmoid(i8*);
* i8* llvm.
hpvm
.tensor.sigmoid(i8*);
*/
def int_
visc
_tensor_sigmoid : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
def int_
hpvm
_tensor_sigmoid : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
/* Tensor softmax intrinsic
* i8* llvm.
visc
.tensor.softmax(i8*);
* i8* llvm.
hpvm
.tensor.softmax(i8*);
*/
def int_
visc
_tensor_softmax : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
def int_
hpvm
_tensor_softmax : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
/* Tensor convolution intrinsic
* i8* llvm.
visc
.tensor.convolution(i8*, i8*, i32, i32, i32, i32);
* i8* llvm.
hpvm
.tensor.convolution(i8*, i8*, i32, i32, i32, i32);
*/
def int_
visc
_tensor_convolution : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_convolution : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_ptr_ty,
llvm_i32_ty,
llvm_i32_ty,
...
...
@@ -276,9 +258,9 @@ let TargetPrefix = "visc" in {
llvm_i32_ty], []>;
/* Tensor group convolution intrinsic
* i8* llvm.
visc
.tensor.group.convolution(i8*, i8*, i32, i32, i32, i32, i32, i32);
* i8* llvm.
hpvm
.tensor.group.convolution(i8*, i8*, i32, i32, i32, i32, i32, i32);
*/
def int_
visc
_tensor_group_convolution : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_group_convolution : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_ptr_ty,
llvm_i32_ty,
llvm_i32_ty,
...
...
@@ -288,9 +270,9 @@ let TargetPrefix = "visc" in {
llvm_i32_ty], []>;
/* Tensor BatchNorm intrinsic
* i8* llvm.
visc
.tensor.batchnorm(i8*, i8*, i8*, i8*, i8*, double);
* i8* llvm.
hpvm
.tensor.batchnorm(i8*, i8*, i8*, i8*, i8*, double);
*/
def int_
visc
_tensor_batchnorm : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_batchnorm : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_ptr_ty,
llvm_ptr_ty,
llvm_ptr_ty,
...
...
@@ -299,25 +281,25 @@ let TargetPrefix = "visc" in {
/* Tensor pool intrinsics: max, min, average
* i8* llvm.
visc
.tensor.pool.max(i8*, i32, i32, i32, i32, i32, i32);
* i8* llvm.
visc
.tensor.pool.min(i8*, i32, i32, i32, i32, i32, i32);
* i8* llvm.
visc
.tensor.pool.average(i8*, i32, i32, i32, i32, i32, i32);
* i8* llvm.
hpvm
.tensor.pool.max(i8*, i32, i32, i32, i32, i32, i32);
* i8* llvm.
hpvm
.tensor.pool.min(i8*, i32, i32, i32, i32, i32, i32);
* i8* llvm.
hpvm
.tensor.pool.average(i8*, i32, i32, i32, i32, i32, i32);
*/
def int_
visc
_tensor_pool_max : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_pool_max : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty], []>;
def int_
visc
_tensor_pool_min : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_pool_min : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty], []>;
def int_
visc
_tensor_pool_mean : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
def int_
hpvm
_tensor_pool_mean : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
llvm_i32_ty,
llvm_i32_ty,
llvm_i32_ty,
...
...
@@ -325,7 +307,5 @@ let TargetPrefix = "visc" in {
llvm_i32_ty,
llvm_i32_ty], []>;
def int_visc_node_id : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], []>;
def int_hpvm_node_id : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], []>;
}
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment