From 94fc868d63fde18977f9c5f744835d4a967811c3 Mon Sep 17 00:00:00 2001 From: akashk4 <akashk4@illinois.edu> Date: Tue, 7 Jan 2020 07:45:29 -0600 Subject: [PATCH] Include path to instrinsic table for HPVM --- hpvm/llvm_patches/include/IR/Intrinsics.td | 676 ++++++++++++++++++--- 1 file changed, 582 insertions(+), 94 deletions(-) diff --git a/hpvm/llvm_patches/include/IR/Intrinsics.td b/hpvm/llvm_patches/include/IR/Intrinsics.td index eed22c81b9..2f79964a2e 100644 --- a/hpvm/llvm_patches/include/IR/Intrinsics.td +++ b/hpvm/llvm_patches/include/IR/Intrinsics.td @@ -1,9 +1,8 @@ //===- Intrinsics.td - Defines all LLVM intrinsics ---------*- tablegen -*-===// // -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // @@ -12,6 +11,7 @@ //===----------------------------------------------------------------------===// include "llvm/CodeGen/ValueTypes.td" +include "llvm/CodeGen/SDNodeProperties.td" //===----------------------------------------------------------------------===// // Properties we keep track of for intrinsics. @@ -69,6 +69,11 @@ class Returned<int argNo> : IntrinsicProperty { int ArgNo = argNo; } +// ImmArg - The specified argument must be an immediate. +class ImmArg<int argNo> : IntrinsicProperty { + int ArgNo = argNo; +} + // ReadOnly - The specified argument pointer is not written to through the // pointer by the intrinsic. class ReadOnly<int argNo> : IntrinsicProperty { @@ -89,6 +94,12 @@ class ReadNone<int argNo> : IntrinsicProperty { def IntrNoReturn : IntrinsicProperty; +def IntrWillReturn : IntrinsicProperty; + +// IntrCold - Calls to this intrinsic are cold. +// Parallels the cold attribute on LLVM IR functions. +def IntrCold : IntrinsicProperty; + // IntrNoduplicate - Calls to this intrinsic cannot be duplicated. // Parallels the noduplicate attribute on LLVM IR functions. def IntrNoDuplicate : IntrinsicProperty; @@ -98,12 +109,25 @@ def IntrNoDuplicate : IntrinsicProperty; // Parallels the convergent attribute on LLVM IR functions. def IntrConvergent : IntrinsicProperty; +// This property indicates that the intrinsic is safe to speculate. +def IntrSpeculatable : IntrinsicProperty; + +// This property can be used to override the 'has no other side effects' +// language of the IntrNoMem, IntrReadMem, IntrWriteMem, and IntrArgMemOnly +// intrinsic properties. By default, intrinsics are assumed to have side +// effects, so this property is only necessary if you have defined one of +// the memory properties listed above. +// For this property, 'side effects' has the same meaning as 'side effects' +// defined by the hasSideEffects property of the TableGen Instruction class. +def IntrHasSideEffects : IntrinsicProperty; + //===----------------------------------------------------------------------===// // Types used by intrinsics. //===----------------------------------------------------------------------===// class LLVMType<ValueType vt> { ValueType VT = vt; + int isAny = 0; } class LLVMQualPointerType<LLVMType elty, int addrspace> @@ -118,6 +142,8 @@ class LLVMPointerType<LLVMType elty> class LLVMAnyPointerType<LLVMType elty> : LLVMType<iPTRAny>{ LLVMType ElTy = elty; + + let isAny = 1; } // Match the type of another intrinsic parameter. Number is an index into the @@ -137,23 +163,31 @@ class LLVMMatchType<int num> // the intrinsic is overloaded, so the matched type should be declared as iAny. class LLVMExtendedType<int num> : LLVMMatchType<num>; class LLVMTruncatedType<int num> : LLVMMatchType<num>; -class LLVMVectorSameWidth<int num, LLVMType elty> - : LLVMMatchType<num> { + +// Match the scalar/vector of another intrinsic parameter but with a different +// element type. Either both are scalars or both are vectors with the same +// number of elements. +class LLVMScalarOrSameVectorWidth<int idx, LLVMType elty> + : LLVMMatchType<idx> { ValueType ElTy = elty.VT; } + class LLVMPointerTo<int num> : LLVMMatchType<num>; class LLVMPointerToElt<int num> : LLVMMatchType<num>; -class LLVMVectorOfPointersToElt<int num> : LLVMMatchType<num>; +class LLVMVectorOfAnyPointersToElt<int num> : LLVMMatchType<num>; +class LLVMVectorElementType<int num> : LLVMMatchType<num>; // Match the type of another intrinsic parameter that is expected to be a // vector type, but change the element count to be half as many class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>; def llvm_void_ty : LLVMType<isVoid>; -def llvm_any_ty : LLVMType<Any>; -def llvm_anyint_ty : LLVMType<iAny>; -def llvm_anyfloat_ty : LLVMType<fAny>; -def llvm_anyvector_ty : LLVMType<vAny>; +let isAny = 1 in { + def llvm_any_ty : LLVMType<Any>; + def llvm_anyint_ty : LLVMType<iAny>; + def llvm_anyfloat_ty : LLVMType<fAny>; + def llvm_anyvector_ty : LLVMType<vAny>; +} def llvm_i1_ty : LLVMType<i1>; def llvm_i8_ty : LLVMType<i8>; def llvm_i16_ty : LLVMType<i16>; @@ -229,6 +263,7 @@ def llvm_v2f32_ty : LLVMType<v2f32>; // 2 x float def llvm_v4f32_ty : LLVMType<v4f32>; // 4 x float def llvm_v8f32_ty : LLVMType<v8f32>; // 8 x float def llvm_v16f32_ty : LLVMType<v16f32>; // 16 x float +def llvm_v32f32_ty : LLVMType<v32f32>; // 32 x float def llvm_v1f64_ty : LLVMType<v1f64>; // 1 x double def llvm_v2f64_ty : LLVMType<v2f64>; // 2 x double def llvm_v4f64_ty : LLVMType<v4f64>; // 4 x double @@ -236,7 +271,6 @@ def llvm_v8f64_ty : LLVMType<v8f64>; // 8 x double def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here - //===----------------------------------------------------------------------===// // Intrinsic Definitions. //===----------------------------------------------------------------------===// @@ -252,16 +286,17 @@ def llvm_vararg_ty : LLVMType<isVoid>; // this means vararg here // intrinsic. // * Properties can be set to describe the behavior of the intrinsic. // -class SDPatternOperator; class Intrinsic<list<LLVMType> ret_types, list<LLVMType> param_types = [], - list<IntrinsicProperty> properties = [], - string name = ""> : SDPatternOperator { + list<IntrinsicProperty> intr_properties = [], + string name = "", + list<SDNodeProperty> sd_properties = []> : SDPatternOperator { string LLVMName = name; string TargetPrefix = ""; // Set to a prefix for target-specific intrinsics. list<LLVMType> RetTypes = ret_types; list<LLVMType> ParamTypes = param_types; - list<IntrinsicProperty> IntrProperties = properties; + list<IntrinsicProperty> IntrProperties = intr_properties; + let Properties = sd_properties; bit isTarget = 0; } @@ -297,11 +332,84 @@ def int_gcwrite : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty], [IntrArgMemOnly, NoCapture<1>, NoCapture<2>]>; +//===------------------- ObjC ARC runtime Intrinsics --------------------===// +// +// Note these are to support the Objective-C ARC optimizer which wants to +// eliminate retain and releases where possible. + +def int_objc_autorelease : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_autoreleasePoolPop : Intrinsic<[], [llvm_ptr_ty]>; +def int_objc_autoreleasePoolPush : Intrinsic<[llvm_ptr_ty], []>; +def int_objc_autoreleaseReturnValue : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_copyWeak : Intrinsic<[], + [llvm_ptrptr_ty, + llvm_ptrptr_ty]>; +def int_objc_destroyWeak : Intrinsic<[], [llvm_ptrptr_ty]>; +def int_objc_initWeak : Intrinsic<[llvm_ptr_ty], + [llvm_ptrptr_ty, + llvm_ptr_ty]>; +def int_objc_loadWeak : Intrinsic<[llvm_ptr_ty], + [llvm_ptrptr_ty]>; +def int_objc_loadWeakRetained : Intrinsic<[llvm_ptr_ty], + [llvm_ptrptr_ty]>; +def int_objc_moveWeak : Intrinsic<[], + [llvm_ptrptr_ty, + llvm_ptrptr_ty]>; +def int_objc_release : Intrinsic<[], [llvm_ptr_ty]>; +def int_objc_retain : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_retainAutorelease : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_retainAutoreleaseReturnValue : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_retainAutoreleasedReturnValue : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_retainBlock : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_storeStrong : Intrinsic<[], + [llvm_ptrptr_ty, + llvm_ptr_ty]>; +def int_objc_storeWeak : Intrinsic<[llvm_ptr_ty], + [llvm_ptrptr_ty, + llvm_ptr_ty]>; +def int_objc_clang_arc_use : Intrinsic<[], + [llvm_vararg_ty]>; +def int_objc_unsafeClaimAutoreleasedReturnValue : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_retainedObject : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_unretainedObject : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_unretainedPointer : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_retain_autorelease : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty]>; +def int_objc_sync_enter : Intrinsic<[llvm_i32_ty], + [llvm_ptr_ty]>; +def int_objc_sync_exit : Intrinsic<[llvm_i32_ty], + [llvm_ptr_ty]>; +def int_objc_arc_annotation_topdown_bbstart : Intrinsic<[], + [llvm_ptrptr_ty, + llvm_ptrptr_ty]>; +def int_objc_arc_annotation_topdown_bbend : Intrinsic<[], + [llvm_ptrptr_ty, + llvm_ptrptr_ty]>; +def int_objc_arc_annotation_bottomup_bbstart : Intrinsic<[], + [llvm_ptrptr_ty, + llvm_ptrptr_ty]>; +def int_objc_arc_annotation_bottomup_bbend : Intrinsic<[], + [llvm_ptrptr_ty, + llvm_ptrptr_ty]>; + + //===--------------------- Code Generator Intrinsics ----------------------===// // -def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>; +def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>; def int_addressofreturnaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; -def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>; +def int_frameaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem, ImmArg<0>]>; +def int_sponentry : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; def int_read_register : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty], [IntrReadMem], "llvm.read_register">; def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty], @@ -318,7 +426,14 @@ def int_localescape : Intrinsic<[], [llvm_vararg_ty]>; // to an escaped allocation indicated by the index. def int_localrecover : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<2>]>; + +// Given the frame pointer passed into an SEH filter function, returns a +// pointer to the local variable area suitable for use with llvm.localrecover. +def int_eh_recoverfp : Intrinsic<[llvm_ptr_ty], + [llvm_ptr_ty, llvm_ptr_ty], [IntrNoMem]>; + // Note: we treat stacksave/stackrestore as writemem because we don't otherwise // model their dependencies on allocas. def int_stacksave : Intrinsic<[llvm_ptr_ty]>, @@ -331,13 +446,14 @@ def int_get_dynamic_area_offset : Intrinsic<[llvm_anyint_ty]>; def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>, GCCBuiltin<"__builtin_thread_pointer">; -// IntrArgMemOnly is more pessimistic than strictly necessary for prefetch, -// however it does conveniently prevent the prefetch from being reordered -// with respect to nearby accesses to the same memory. -def int_prefetch : Intrinsic<[], - [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, - llvm_i32_ty], - [IntrArgMemOnly, NoCapture<0>]>; +// IntrInaccessibleMemOrArgMemOnly is a little more pessimistic than strictly +// necessary for prefetch, however it does conveniently prevent the prefetch +// from being reordered overly much with respect to nearby access to the same +// memory while not impeding optimization. +def int_prefetch + : Intrinsic<[], [ llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty ], + [ IntrInaccessibleMemOrArgMemOnly, ReadOnly<0>, NoCapture<0>, + ImmArg<1>, ImmArg<2>]>; def int_pcmarker : Intrinsic<[], [llvm_i32_ty]>; def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>; @@ -376,20 +492,24 @@ def int_instrprof_value_profile : Intrinsic<[], def int_memcpy : Intrinsic<[], [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, - llvm_i32_ty, llvm_i1_ty], + llvm_i1_ty], [IntrArgMemOnly, NoCapture<0>, NoCapture<1>, - WriteOnly<0>, ReadOnly<1>]>; + WriteOnly<0>, ReadOnly<1>, ImmArg<3>]>; def int_memmove : Intrinsic<[], [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, - llvm_i32_ty, llvm_i1_ty], + llvm_i1_ty], [IntrArgMemOnly, NoCapture<0>, NoCapture<1>, - ReadOnly<1>]>; + ReadOnly<1>, ImmArg<3>]>; def int_memset : Intrinsic<[], [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, - llvm_i32_ty, llvm_i1_ty], - [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>; + llvm_i1_ty], + [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>, + ImmArg<3>]>; -let IntrProperties = [IntrNoMem] in { +// FIXME: Add version of these floating point intrinsics which allow non-default +// rounding modes and FP exception handling. + +let IntrProperties = [IntrNoMem, IntrSpeculatable] in { def int_fma : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>; @@ -422,13 +542,28 @@ let IntrProperties = [IntrNoMem] in { def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>; def int_canonicalize : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>; + + def int_lround : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>; + def int_llround : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>; + def int_lrint : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>; + def int_llrint : Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty]>; } def int_minnum : Intrinsic<[llvm_anyfloat_ty], - [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, Commutative] + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative] >; def int_maxnum : Intrinsic<[llvm_anyfloat_ty], - [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, Commutative] + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative] +>; +def int_minimum : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative] +>; +def int_maximum : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative] >; // NOTE: these are internal interfaces. @@ -438,42 +573,187 @@ def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>; def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>; // Internal interface for object size checking -def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, llvm_i1_ty], - [IntrNoMem]>, +def int_objectsize : Intrinsic<[llvm_anyint_ty], + [llvm_anyptr_ty, llvm_i1_ty, + llvm_i1_ty, llvm_i1_ty], + [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>, ImmArg<3>]>, GCCBuiltin<"__builtin_object_size">; +//===--------------- Constrained Floating Point Intrinsics ----------------===// +// + +let IntrProperties = [IntrInaccessibleMemOnly] in { + def int_experimental_constrained_fadd : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_fsub : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_fmul : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_fdiv : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_frem : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + + def int_experimental_constrained_fma : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + + def int_experimental_constrained_fptrunc : Intrinsic<[ llvm_anyfloat_ty ], + [ llvm_anyfloat_ty, + llvm_metadata_ty, + llvm_metadata_ty ]>; + + def int_experimental_constrained_fpext : Intrinsic<[ llvm_anyfloat_ty ], + [ llvm_anyfloat_ty, + llvm_metadata_ty ]>; + + // These intrinsics are sensitive to the rounding mode so we need constrained + // versions of each of them. When strict rounding and exception control are + // not required the non-constrained versions of these intrinsics should be + // used. + def int_experimental_constrained_sqrt : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_powi : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_i32_ty, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_sin : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_cos : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_pow : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_log : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_log10: Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_log2 : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_exp2 : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_rint : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_nearbyint : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_maxnum : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_minnum : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_ceil : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_floor : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_round : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; + def int_experimental_constrained_trunc : Intrinsic<[ llvm_anyfloat_ty ], + [ LLVMMatchType<0>, + llvm_metadata_ty, + llvm_metadata_ty ]>; +} +// FIXME: Add intrinsics for fcmp, fptoui and fptosi. + //===------------------------- Expect Intrinsics --------------------------===// // -def int_expect : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, - LLVMMatchType<0>], [IntrNoMem]>; +def int_expect : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>; //===-------------------- Bit Manipulation Intrinsics ---------------------===// // // None of these intrinsics accesses memory at all. -let IntrProperties = [IntrNoMem] in { +let IntrProperties = [IntrNoMem, IntrSpeculatable] in { def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; + def int_bitreverse : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; + def int_fshl : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>; + def int_fshr : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>]>; +} + +let IntrProperties = [IntrNoMem, IntrSpeculatable, ImmArg<1>] in { def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>; def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>; - def int_bitreverse : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>; } //===------------------------ Debugger Intrinsics -------------------------===// // -// None of these intrinsics accesses memory at all...but that doesn't mean the -// optimizers can change them aggressively. Special handling needed in a few -// places. -let IntrProperties = [IntrNoMem] in { +// None of these intrinsics accesses memory at all...but that doesn't +// mean the optimizers can change them aggressively. Special handling +// needed in a few places. These synthetic intrinsics have no +// side-effects and just mark information about their operands. +let IntrProperties = [IntrNoMem, IntrSpeculatable] in { def int_dbg_declare : Intrinsic<[], [llvm_metadata_ty, - llvm_metadata_ty, - llvm_metadata_ty]>; + llvm_metadata_ty, + llvm_metadata_ty]>; def int_dbg_value : Intrinsic<[], - [llvm_metadata_ty, llvm_i64_ty, + [llvm_metadata_ty, llvm_metadata_ty, llvm_metadata_ty]>; + def int_dbg_addr : Intrinsic<[], + [llvm_metadata_ty, + llvm_metadata_ty, + llvm_metadata_ty]>; + def int_dbg_label : Intrinsic<[], + [llvm_metadata_ty]>; } //===------------------ Exception Handling Intrinsics----------------------===// @@ -526,6 +806,13 @@ def int_annotation : Intrinsic<[llvm_anyint_ty], llvm_ptr_ty, llvm_i32_ty], [], "llvm.annotation">; +// Annotates the current program point with metadata strings which are emitted +// as CodeView debug info records. This is expensive, as it disables inlining +// and is modelled as having side effects. +def int_codeview_annotation : Intrinsic<[], [llvm_metadata_ty], + [IntrInaccessibleMemOnly, IntrNoDuplicate], + "llvm.codeview.annotation">; + //===------------------------ Trampoline Intrinsics -----------------------===// // def int_init_trampoline : Intrinsic<[], @@ -541,46 +828,100 @@ def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], // // Expose the carry flag from add operations on two integrals. -def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], +def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; -def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [IntrNoMem, IntrSpeculatable]>; +def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; + [IntrNoMem, IntrSpeculatable]>; -def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], +def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; -def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [IntrNoMem, IntrSpeculatable]>; +def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; + [IntrNoMem, IntrSpeculatable]>; -def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], +def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; -def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty], + [IntrNoMem, IntrSpeculatable]>; +def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [LLVMMatchType<0>, LLVMMatchType<0>], - [IntrNoMem]>; + [IntrNoMem, IntrSpeculatable]>; + +//===------------------------- Saturation Arithmetic Intrinsics ---------------------===// +// +def int_sadd_sat : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative]>; +def int_uadd_sat : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable, Commutative]>; +def int_ssub_sat : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; +def int_usub_sat : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>], + [IntrNoMem, IntrSpeculatable]>; + +//===------------------------- Fixed Point Arithmetic Intrinsics ---------------------===// +// +def int_smul_fix : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], + [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<2>]>; + +def int_umul_fix : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], + [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<2>]>; + +//===------------------- Fixed Point Saturation Arithmetic Intrinsics ----------------===// +// +def int_smul_fix_sat : Intrinsic<[llvm_anyint_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], + [IntrNoMem, IntrSpeculatable, Commutative, ImmArg<2>]>; //===------------------------- Memory Use Markers -------------------------===// // def int_lifetime_start : Intrinsic<[], - [llvm_i64_ty, llvm_ptr_ty], - [IntrArgMemOnly, NoCapture<1>]>; + [llvm_i64_ty, llvm_anyptr_ty], + [IntrArgMemOnly, NoCapture<1>, ImmArg<0>]>; def int_lifetime_end : Intrinsic<[], - [llvm_i64_ty, llvm_ptr_ty], - [IntrArgMemOnly, NoCapture<1>]>; + [llvm_i64_ty, llvm_anyptr_ty], + [IntrArgMemOnly, NoCapture<1>, ImmArg<0>]>; def int_invariant_start : Intrinsic<[llvm_descriptor_ty], [llvm_i64_ty, llvm_anyptr_ty], - [IntrArgMemOnly, NoCapture<1>]>; + [IntrArgMemOnly, NoCapture<1>, ImmArg<0>]>; def int_invariant_end : Intrinsic<[], [llvm_descriptor_ty, llvm_i64_ty, llvm_anyptr_ty], - [IntrArgMemOnly, NoCapture<2>]>; - -def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty], - [llvm_ptr_ty], - [IntrNoMem]>; + [IntrArgMemOnly, NoCapture<2>, ImmArg<1>]>; + +// launder.invariant.group can't be marked with 'readnone' (IntrNoMem), +// because it would cause CSE of two barriers with the same argument. +// Inaccessiblememonly says that the barrier doesn't read the argument, +// but it changes state not accessible to this module. This way +// we can DSE through the barrier because it doesn't read the value +// after store. Although the barrier doesn't modify any memory it +// can't be marked as readonly, because it would be possible to +// CSE 2 barriers with store in between. +// The argument also can't be marked with 'returned' attribute, because +// it would remove barrier. +// Note that it is still experimental, which means that its semantics +// might change in the future. +def int_launder_invariant_group : Intrinsic<[llvm_anyptr_ty], + [LLVMMatchType<0>], + [IntrInaccessibleMemOnly, IntrSpeculatable]>; + + +def int_strip_invariant_group : Intrinsic<[llvm_anyptr_ty], + [LLVMMatchType<0>], + [IntrSpeculatable, IntrNoMem]>; //===------------------------ Stackmap Intrinsics -------------------------===// // @@ -606,33 +947,34 @@ def int_experimental_gc_statepoint : Intrinsic<[llvm_token_ty], [llvm_i64_ty, llvm_i32_ty, llvm_anyptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_vararg_ty], - [Throws]>; + [Throws, ImmArg<0>, ImmArg<1>, ImmArg<3>, ImmArg<4>]>; def int_experimental_gc_result : Intrinsic<[llvm_any_ty], [llvm_token_ty], [IntrReadMem]>; def int_experimental_gc_relocate : Intrinsic<[llvm_any_ty], [llvm_token_ty, llvm_i32_ty, llvm_i32_ty], - [IntrReadMem]>; + [IntrReadMem, ImmArg<1>, ImmArg<2>]>; //===------------------------ Coroutine Intrinsics ---------------===// // These are documented in docs/Coroutines.rst // Coroutine Structure Intrinsics. -def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty, - llvm_ptr_ty, llvm_ptr_ty], - [IntrArgMemOnly, IntrReadMem, +def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty, + llvm_ptr_ty, llvm_ptr_ty], + [IntrArgMemOnly, IntrReadMem, ReadNone<1>, ReadOnly<2>, NoCapture<2>]>; def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>; def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty], [WriteOnly<1>]>; -def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty], - [IntrReadMem, IntrArgMemOnly, ReadOnly<1>, +def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty], + [IntrReadMem, IntrArgMemOnly, ReadOnly<1>, NoCapture<1>]>; -def int_coro_end : Intrinsic<[], [llvm_ptr_ty, llvm_i1_ty], []>; +def int_coro_end : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_i1_ty], []>; def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; +def int_coro_noop : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>; def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>; def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>; @@ -661,7 +1003,7 @@ def int_coro_subfn_addr : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty], // def int_flt_rounds : Intrinsic<[llvm_i32_ty]>, GCCBuiltin<"__builtin_flt_rounds">; -def int_trap : Intrinsic<[], [], [IntrNoReturn]>, +def int_trap : Intrinsic<[], [], [IntrNoReturn, IntrCold]>, GCCBuiltin<"__builtin_trap">; def int_debugtrap : Intrinsic<[]>, GCCBuiltin<"__builtin_debugtrap">; @@ -674,9 +1016,19 @@ def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty], def int_experimental_guard : Intrinsic<[], [llvm_i1_ty, llvm_vararg_ty], [Throws]>; +// Supports widenable conditions for guards represented as explicit branches. +def int_experimental_widenable_condition : Intrinsic<[llvm_i1_ty], [], + [IntrInaccessibleMemOnly]>; + // NOP: calls/invokes to this intrinsic are removed by codegen def int_donothing : Intrinsic<[], [], [IntrNoMem]>; +// This instruction has no actual effect, though it is treated by the optimizer +// has having opaque side effects. This may be inserted into loops to ensure +// that they are not removed even if they turn out to be empty, for languages +// which specify that infinite loops must be preserved. +def int_sideeffect : Intrinsic<[], [], [IntrInaccessibleMemOnly]>; + // Intrisics to support half precision floating point format let IntrProperties = [IntrNoMem] in { def int_convert_to_fp16 : Intrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>; @@ -688,40 +1040,44 @@ def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>; def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [], "llvm.clear_cache">; +// Intrinsic to detect whether its argument is a constant. +def int_is_constant : Intrinsic<[llvm_i1_ty], [llvm_any_ty], [IntrNoMem], "llvm.is.constant">; + //===-------------------------- Masked Intrinsics -------------------------===// // def int_masked_store : Intrinsic<[], [llvm_anyvector_ty, LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty, - LLVMVectorSameWidth<0, llvm_i1_ty>], - [IntrArgMemOnly]>; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], + [IntrArgMemOnly, ImmArg<2>]>; def int_masked_load : Intrinsic<[llvm_anyvector_ty], [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty, - LLVMVectorSameWidth<0, llvm_i1_ty>, LLVMMatchType<0>], - [IntrReadMem, IntrArgMemOnly]>; + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>], + [IntrReadMem, IntrArgMemOnly, ImmArg<1>]>; def int_masked_gather: Intrinsic<[llvm_anyvector_ty], - [LLVMVectorOfPointersToElt<0>, llvm_i32_ty, - LLVMVectorSameWidth<0, llvm_i1_ty>, + [LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>], - [IntrReadMem]>; + [IntrReadMem, ImmArg<1>]>; def int_masked_scatter: Intrinsic<[], [llvm_anyvector_ty, - LLVMVectorOfPointersToElt<0>, llvm_i32_ty, - LLVMVectorSameWidth<0, llvm_i1_ty>]>; + LLVMVectorOfAnyPointersToElt<0>, llvm_i32_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], + [ImmArg<2>]>; def int_masked_expandload: Intrinsic<[llvm_anyvector_ty], [LLVMPointerToElt<0>, - LLVMVectorSameWidth<0, llvm_i1_ty>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<0>], [IntrReadMem]>; def int_masked_compressstore: Intrinsic<[], [llvm_anyvector_ty, LLVMPointerToElt<0>, - LLVMVectorSameWidth<0, llvm_i1_ty>], + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>], [IntrArgMemOnly]>; // Test whether a pointer is associated with a type metadata identifier. @@ -733,17 +1089,148 @@ def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty], [IntrNoMem]>; +// Create a branch funnel that implements an indirect call to a limited set of +// callees. This needs to be a musttail call. +def int_icall_branch_funnel : Intrinsic<[], [llvm_vararg_ty], []>; + def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty], [IntrReadMem, IntrArgMemOnly]>; +def int_hwasan_check_memaccess : + Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty], [IntrInaccessibleMemOnly, ImmArg<2>]>; + +// Xray intrinsics +//===----------------------------------------------------------------------===// +// Custom event logging for x-ray. +// Takes a pointer to a string and the length of the string. +def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], + [NoCapture<0>, ReadOnly<0>, IntrWriteMem]>; +// Typed event logging for x-ray. +// Takes a numeric type tag, a pointer to a string and the length of the string. +def int_xray_typedevent : Intrinsic<[], [llvm_i16_ty, llvm_ptr_ty, llvm_i32_ty], + [NoCapture<1>, ReadOnly<1>, IntrWriteMem]>; +//===----------------------------------------------------------------------===// + //===------ Memory intrinsics with element-wise atomicity guarantees ------===// // -def int_memcpy_element_atomic : Intrinsic<[], - [llvm_anyptr_ty, llvm_anyptr_ty, - llvm_i64_ty, llvm_i32_ty], - [IntrArgMemOnly, NoCapture<0>, NoCapture<1>, - WriteOnly<0>, ReadOnly<1>]>; +// @llvm.memcpy.element.unordered.atomic.*(dest, src, length, elementsize) +def int_memcpy_element_unordered_atomic + : Intrinsic<[], + [ + llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty + ], + [ + IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>, + ReadOnly<1>, ImmArg<3> + ]>; + +// @llvm.memmove.element.unordered.atomic.*(dest, src, length, elementsize) +def int_memmove_element_unordered_atomic + : Intrinsic<[], + [ + llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty + ], + [ + IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>, + ReadOnly<1>, ImmArg<3> + ]>; + +// @llvm.memset.element.unordered.atomic.*(dest, value, length, elementsize) +def int_memset_element_unordered_atomic + : Intrinsic<[], [ llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty, llvm_i32_ty ], + [ IntrArgMemOnly, NoCapture<0>, WriteOnly<0>, ImmArg<3> ]>; + +//===------------------------ Reduction Intrinsics ------------------------===// +// +def int_experimental_vector_reduce_v2_fadd : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, + llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_v2_fmul : Intrinsic<[llvm_anyfloat_ty], + [LLVMMatchType<0>, + llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_add : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_mul : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_and : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_or : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_xor : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_smax : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_smin : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_umax : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_umin : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_fmax : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; +def int_experimental_vector_reduce_fmin : Intrinsic<[LLVMVectorElementType<0>], + [llvm_anyvector_ty], + [IntrNoMem]>; + +//===---------- Intrinsics to control hardware supported loops ----------===// + +// Specify that the value given is the number of iterations that the next loop +// will execute. +def int_set_loop_iterations : + Intrinsic<[], [llvm_anyint_ty], [IntrNoDuplicate]>; + +// Specify that the value given is the number of iterations that the next loop +// will execute. Also test that the given count is not zero, allowing it to +// control entry to a 'while' loop. +def int_test_set_loop_iterations : + Intrinsic<[llvm_i1_ty], [llvm_anyint_ty], [IntrNoDuplicate]>; + +// Decrement loop counter by the given argument. Return false if the loop +// should exit. +def int_loop_decrement : + Intrinsic<[llvm_i1_ty], [llvm_anyint_ty], [IntrNoDuplicate]>; + +// Decrement the first operand (the loop counter) by the second operand (the +// maximum number of elements processed in an iteration). Return the remaining +// number of iterations still to be executed. This is effectively a sub which +// can be used with a phi, icmp and br to control the number of iterations +// executed, as usual. +def int_loop_decrement_reg : + Intrinsic<[llvm_anyint_ty], + [llvm_anyint_ty, llvm_anyint_ty], [IntrNoDuplicate]>; + +//===----- Intrinsics that are used to provide predicate information -----===// + +def int_ssa_copy : Intrinsic<[llvm_any_ty], [LLVMMatchType<0>], + [IntrNoMem, Returned<0>]>; + +//===------- Intrinsics that are used to preserve debug information -------===// + +def int_preserve_array_access_index : Intrinsic<[llvm_anyptr_ty], + [llvm_anyptr_ty, llvm_i32_ty, + llvm_i32_ty], + [IntrNoMem, ImmArg<1>, ImmArg<2>]>; +def int_preserve_union_access_index : Intrinsic<[llvm_anyptr_ty], + [llvm_anyptr_ty, llvm_i32_ty], + [IntrNoMem, ImmArg<1>]>; +def int_preserve_struct_access_index : Intrinsic<[llvm_anyptr_ty], + [llvm_anyptr_ty, llvm_i32_ty, + llvm_i32_ty], + [IntrNoMem, ImmArg<1>, + ImmArg<2>]>; //===----------------------------------------------------------------------===// // Target-specific intrinsics @@ -761,4 +1248,5 @@ include "llvm/IR/IntrinsicsAMDGPU.td" include "llvm/IR/IntrinsicsBPF.td" include "llvm/IR/IntrinsicsSystemZ.td" include "llvm/IR/IntrinsicsWebAssembly.td" +include "llvm/IR/IntrinsicsRISCV.td" include "llvm/IR/IntrinsicsVISC.td" -- GitLab