From 4f6a876f2923cb07c86c8565f0666ec1a750ea51 Mon Sep 17 00:00:00 2001
From: Hashim Sharif <hsharif3@tyler.cs.illinois.edu>
Date: Mon, 30 Dec 2019 01:12:48 -0600
Subject: [PATCH] Adding templates for LLVM patches - Incomplete

---
 llvm/tools/hpvm/llvm_patches/.#patch_src.sh   |    1 +
 llvm/tools/hpvm/llvm_patches/apply_patch.sh   |    7 +
 .../hpvm/llvm_patches/construct_patch.sh      |   31 +
 .../include/Bitcode/LLVMBitCodes.h            |  544 ++
 .../include/Bitcode/LLVMBitCodes.h.patch      |   16 +
 .../llvm_patches/include/IR/Attributes.td     |  223 +
 .../include/IR/Attributes.td.patch            |   19 +
 .../llvm_patches/include/IR/Intrinsics.td     |  764 ++
 .../include/IR/Intrinsics.td.patch            |    7 +
 .../llvm_patches/include/IR/IntrinsicsVISC.td |  328 +
 .../include/IR/IntrinsicsVISC.td.patch        |    0
 .../hpvm/llvm_patches/include/Support/Debug.h |  104 +
 .../include/Support/Debug.h.patch             |    0
 .../llvm_patches/lib/AsmParser/LLLexer.cpp    | 1020 +++
 .../lib/AsmParser/LLLexer.cpp.patch           |   13 +
 .../hpvm/llvm_patches/lib/AsmParser/LLLexer.h |   96 +
 .../lib/AsmParser/LLLexer.h.patch             |    0
 .../llvm_patches/lib/AsmParser/LLParser.cpp   | 6574 +++++++++++++++++
 .../lib/AsmParser/LLParser.cpp.patch          |   36 +
 .../llvm_patches/lib/AsmParser/LLParser.h     |  510 ++
 .../lib/AsmParser/LLParser.h.patch            |    0
 .../hpvm/llvm_patches/lib/AsmParser/LLToken.h |  371 +
 .../lib/AsmParser/LLToken.h.patch             |   13 +
 .../lib/Bitcode/Reader/BitcodeReader.cpp      | 5408 ++++++++++++++
 .../Bitcode/Reader/BitcodeReader.cpp.patch    |   15 +
 .../lib/Bitcode/Writer/BitcodeWriter.cpp      | 3970 ++++++++++
 .../Bitcode/Writer/BitcodeWriter.cpp.patch    |   18 +
 .../hpvm/llvm_patches/lib/IR/Attributes.cpp   | 1525 ++++
 .../llvm_patches/lib/IR/Attributes.cpp.patch  |   17 +
 29 files changed, 21630 insertions(+)
 create mode 120000 llvm/tools/hpvm/llvm_patches/.#patch_src.sh
 create mode 100644 llvm/tools/hpvm/llvm_patches/apply_patch.sh
 create mode 100644 llvm/tools/hpvm/llvm_patches/construct_patch.sh
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/IR/IntrinsicsVISC.td
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/IR/IntrinsicsVISC.td.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/Support/Debug.h
 create mode 100644 llvm/tools/hpvm/llvm_patches/include/Support/Debug.h.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.h
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.h.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.h
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.h.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp.patch
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp
 create mode 100644 llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp.patch

diff --git a/llvm/tools/hpvm/llvm_patches/.#patch_src.sh b/llvm/tools/hpvm/llvm_patches/.#patch_src.sh
new file mode 120000
index 0000000000..1a3c48e892
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/.#patch_src.sh
@@ -0,0 +1 @@
+hsharif3@tyler.cs.illinois.edu.16670:1574121859
\ No newline at end of file
diff --git a/llvm/tools/hpvm/llvm_patches/apply_patch.sh b/llvm/tools/hpvm/llvm_patches/apply_patch.sh
new file mode 100644
index 0000000000..7fe4e80c2d
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/apply_patch.sh
@@ -0,0 +1,7 @@
+#!/bin/sh
+
+### File Copies
+cp include/IR/IntrinsicsVISC.td  ../../../include/llvm/IR/IntrinsicsVISC.td
+
+## File Patches
+patch  /home/hsharif3/Github/llvm-project/llvm/include/llvm/IR/Attributes.td  <  ./include/IR/Attributes.td.patch 
diff --git a/llvm/tools/hpvm/llvm_patches/construct_patch.sh b/llvm/tools/hpvm/llvm_patches/construct_patch.sh
new file mode 100644
index 0000000000..e191308e95
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/construct_patch.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+
+#### Patching Headers
+diff -u  ../../../include/llvm/Bitcode/LLVMBitCodes.h  include/Bitcode/LLVMBitCodes.h > include/Bitcode/LLVMBitCodes.h.patch 
+
+diff -u  ../../../include/llvm/IR/Attributes.td   include/IR/Attributes.td   > include/IR/Attributes.td.patch
+
+diff -u  ../../../include/llvm/IR/Intrinsics.td   include/IR/Intrinsics.td > include/IR/Intrinsics.td.patch
+
+cp include/IR/IntrinsicsVISC.td  ../../../include/llvm/IR/IntrinsicsVISC.td 
+
+diff -u  ../../../include/llvm/Support/Debug.h   include/Support/Debug.h > include/Support/Debug.h.patch
+
+
+#### Patching Sources
+
+diff -u  ../../../lib/AsmParser/LLLexer.cpp   lib/AsmParser/LLLexer.cpp > lib/AsmParser/LLLexer.cpp.patch 
+
+diff -u  ../../../lib/AsmParser/LLLexer.h   lib/AsmParser/LLLexer.h > lib/AsmParser/LLLexer.h.patch
+
+diff -u  ../../../lib/AsmParser/LLParser.cpp   lib/AsmParser/LLParser.cpp > lib/AsmParser/LLParser.cpp.patch
+
+diff -u  ../../../lib/AsmParser/LLParser.h   lib/AsmParser/LLParser.h > lib/AsmParser/LLParser.h.patch
+
+diff -u  ../../../lib/AsmParser/LLToken.h   lib/AsmParser/LLToken.h > lib/AsmParser/LLToken.h.patch
+
+diff -u  ../../../lib/IR/Attributes.cpp   lib/IR/Attributes.cpp > lib/IR/Attributes.cpp.patch
+
+diff -u  ../../../lib/Bitcode/Reader/BitcodeReader.cpp   lib/Bitcode/Reader/BitcodeReader.cpp > lib/Bitcode/Reader/BitcodeReader.cpp.patch
+
+diff -u  ../../../lib/Bitcode/Writer/BitcodeWriter.cpp   lib/Bitcode/Writer/BitcodeWriter.cpp > lib/Bitcode/Writer/BitcodeWriter.cpp.patch
diff --git a/llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h b/llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h
new file mode 100644
index 0000000000..547455805a
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h
@@ -0,0 +1,544 @@
+//===- LLVMBitCodes.h - Enum values for the LLVM bitcode format -*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines Bitcode enum values for LLVM IR bitcode files.
+//
+// The enum values defined in this file should be considered permanent.  If
+// new features are added, they should have values added at the end of the
+// respective lists.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_BITCODE_LLVMBITCODES_H
+#define LLVM_BITCODE_LLVMBITCODES_H
+
+#include "llvm/Bitcode/BitCodes.h"
+
+namespace llvm {
+namespace bitc {
+// The only top-level block type defined is for a module.
+enum BlockIDs {
+  // Blocks
+  MODULE_BLOCK_ID = FIRST_APPLICATION_BLOCKID,
+
+  // Module sub-block id's.
+  PARAMATTR_BLOCK_ID,
+  PARAMATTR_GROUP_BLOCK_ID,
+
+  CONSTANTS_BLOCK_ID,
+  FUNCTION_BLOCK_ID,
+
+  // Block intended to contains information on the bitcode versioning.
+  // Can be used to provide better error messages when we fail to parse a
+  // bitcode file.
+  IDENTIFICATION_BLOCK_ID,
+
+  VALUE_SYMTAB_BLOCK_ID,
+  METADATA_BLOCK_ID,
+  METADATA_ATTACHMENT_ID,
+
+  TYPE_BLOCK_ID_NEW,
+
+  USELIST_BLOCK_ID,
+
+  MODULE_STRTAB_BLOCK_ID,
+  GLOBALVAL_SUMMARY_BLOCK_ID,
+
+  OPERAND_BUNDLE_TAGS_BLOCK_ID,
+
+  METADATA_KIND_BLOCK_ID
+};
+
+/// Identification block contains a string that describes the producer details,
+/// and an epoch that defines the auto-upgrade capability.
+enum IdentificationCodes {
+  IDENTIFICATION_CODE_STRING = 1, // IDENTIFICATION:      [strchr x N]
+  IDENTIFICATION_CODE_EPOCH = 2,  // EPOCH:               [epoch#]
+};
+
+/// The epoch that defines the auto-upgrade compatibility for the bitcode.
+///
+/// LLVM guarantees in a major release that a minor release can read bitcode
+/// generated by previous minor releases. We translate this by making the reader
+/// accepting only bitcode with the same epoch, except for the X.0 release which
+/// also accepts N-1.
+enum { BITCODE_CURRENT_EPOCH = 0 };
+
+/// MODULE blocks have a number of optional fields and subblocks.
+enum ModuleCodes {
+  MODULE_CODE_VERSION = 1,     // VERSION:     [version#]
+  MODULE_CODE_TRIPLE = 2,      // TRIPLE:      [strchr x N]
+  MODULE_CODE_DATALAYOUT = 3,  // DATALAYOUT:  [strchr x N]
+  MODULE_CODE_ASM = 4,         // ASM:         [strchr x N]
+  MODULE_CODE_SECTIONNAME = 5, // SECTIONNAME: [strchr x N]
+
+  // FIXME: Remove DEPLIB in 4.0.
+  MODULE_CODE_DEPLIB = 6, // DEPLIB:      [strchr x N]
+
+  // GLOBALVAR: [pointer type, isconst, initid,
+  //             linkage, alignment, section, visibility, threadlocal]
+  MODULE_CODE_GLOBALVAR = 7,
+
+  // FUNCTION:  [type, callingconv, isproto, linkage, paramattrs, alignment,
+  //             section, visibility, gc, unnamed_addr]
+  MODULE_CODE_FUNCTION = 8,
+
+  // ALIAS: [alias type, aliasee val#, linkage, visibility]
+  MODULE_CODE_ALIAS_OLD = 9,
+
+  // MODULE_CODE_PURGEVALS: [numvals]
+  MODULE_CODE_PURGEVALS = 10,
+
+  MODULE_CODE_GCNAME = 11, // GCNAME: [strchr x N]
+  MODULE_CODE_COMDAT = 12, // COMDAT: [selection_kind, name]
+
+  MODULE_CODE_VSTOFFSET = 13, // VSTOFFSET: [offset]
+
+  // ALIAS: [alias value type, addrspace, aliasee val#, linkage, visibility]
+  MODULE_CODE_ALIAS = 14,
+
+  MODULE_CODE_METADATA_VALUES_UNUSED = 15,
+
+  // SOURCE_FILENAME: [namechar x N]
+  MODULE_CODE_SOURCE_FILENAME = 16,
+
+  // HASH: [5*i32]
+  MODULE_CODE_HASH = 17,
+
+  // IFUNC: [ifunc value type, addrspace, resolver val#, linkage, visibility]
+  MODULE_CODE_IFUNC = 18,
+};
+
+/// PARAMATTR blocks have code for defining a parameter attribute set.
+enum AttributeCodes {
+  // FIXME: Remove `PARAMATTR_CODE_ENTRY_OLD' in 4.0
+  PARAMATTR_CODE_ENTRY_OLD = 1, // ENTRY: [paramidx0, attr0,
+                                //         paramidx1, attr1...]
+  PARAMATTR_CODE_ENTRY = 2,     // ENTRY: [attrgrp0, attrgrp1, ...]
+  PARAMATTR_GRP_CODE_ENTRY = 3  // ENTRY: [grpid, idx, attr0, attr1, ...]
+};
+
+/// TYPE blocks have codes for each type primitive they use.
+enum TypeCodes {
+  TYPE_CODE_NUMENTRY = 1, // NUMENTRY: [numentries]
+
+  // Type Codes
+  TYPE_CODE_VOID = 2,    // VOID
+  TYPE_CODE_FLOAT = 3,   // FLOAT
+  TYPE_CODE_DOUBLE = 4,  // DOUBLE
+  TYPE_CODE_LABEL = 5,   // LABEL
+  TYPE_CODE_OPAQUE = 6,  // OPAQUE
+  TYPE_CODE_INTEGER = 7, // INTEGER: [width]
+  TYPE_CODE_POINTER = 8, // POINTER: [pointee type]
+
+  TYPE_CODE_FUNCTION_OLD = 9, // FUNCTION: [vararg, attrid, retty,
+                              //            paramty x N]
+
+  TYPE_CODE_HALF = 10, // HALF
+
+  TYPE_CODE_ARRAY = 11,  // ARRAY: [numelts, eltty]
+  TYPE_CODE_VECTOR = 12, // VECTOR: [numelts, eltty]
+
+  // These are not with the other floating point types because they're
+  // a late addition, and putting them in the right place breaks
+  // binary compatibility.
+  TYPE_CODE_X86_FP80 = 13,  // X86 LONG DOUBLE
+  TYPE_CODE_FP128 = 14,     // LONG DOUBLE (112 bit mantissa)
+  TYPE_CODE_PPC_FP128 = 15, // PPC LONG DOUBLE (2 doubles)
+
+  TYPE_CODE_METADATA = 16, // METADATA
+
+  TYPE_CODE_X86_MMX = 17, // X86 MMX
+
+  TYPE_CODE_STRUCT_ANON = 18,  // STRUCT_ANON: [ispacked, eltty x N]
+  TYPE_CODE_STRUCT_NAME = 19,  // STRUCT_NAME: [strchr x N]
+  TYPE_CODE_STRUCT_NAMED = 20, // STRUCT_NAMED: [ispacked, eltty x N]
+
+  TYPE_CODE_FUNCTION = 21, // FUNCTION: [vararg, retty, paramty x N]
+
+  TYPE_CODE_TOKEN = 22 // TOKEN
+};
+
+enum OperandBundleTagCode {
+  OPERAND_BUNDLE_TAG = 1, // TAG: [strchr x N]
+};
+
+// Value symbol table codes.
+enum ValueSymtabCodes {
+  VST_CODE_ENTRY = 1,   // VST_ENTRY: [valueid, namechar x N]
+  VST_CODE_BBENTRY = 2, // VST_BBENTRY: [bbid, namechar x N]
+  VST_CODE_FNENTRY = 3, // VST_FNENTRY: [valueid, offset, namechar x N]
+  // VST_COMBINED_ENTRY: [valueid, refguid]
+  VST_CODE_COMBINED_ENTRY = 5
+};
+
+// The module path symbol table only has one code (MST_CODE_ENTRY).
+enum ModulePathSymtabCodes {
+  MST_CODE_ENTRY = 1, // MST_ENTRY: [modid, namechar x N]
+  MST_CODE_HASH = 2,  // MST_HASH:  [5*i32]
+};
+
+// The summary section uses different codes in the per-module
+// and combined index cases.
+enum GlobalValueSummarySymtabCodes {
+  // PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
+  //             n x (valueid)]
+  FS_PERMODULE = 1,
+  // PERMODULE_PROFILE: [valueid, flags, instcount, numrefs,
+  //                     numrefs x valueid,
+  //                     n x (valueid, hotness)]
+  FS_PERMODULE_PROFILE = 2,
+  // PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, n x valueid]
+  FS_PERMODULE_GLOBALVAR_INIT_REFS = 3,
+  // COMBINED: [valueid, modid, flags, instcount, numrefs, numrefs x valueid,
+  //            n x (valueid)]
+  FS_COMBINED = 4,
+  // COMBINED_PROFILE: [valueid, modid, flags, instcount, numrefs,
+  //                    numrefs x valueid,
+  //                    n x (valueid, hotness)]
+  FS_COMBINED_PROFILE = 5,
+  // COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
+  FS_COMBINED_GLOBALVAR_INIT_REFS = 6,
+  // ALIAS: [valueid, flags, valueid]
+  FS_ALIAS = 7,
+  // COMBINED_ALIAS: [valueid, modid, flags, valueid]
+  FS_COMBINED_ALIAS = 8,
+  // COMBINED_ORIGINAL_NAME: [original_name_hash]
+  FS_COMBINED_ORIGINAL_NAME = 9,
+  // VERSION of the summary, bumped when adding flags for instance.
+  FS_VERSION = 10,
+  // The list of llvm.type.test type identifiers used by the following function.
+  FS_TYPE_TESTS = 11,
+};
+
+enum MetadataCodes {
+  METADATA_STRING_OLD = 1,     // MDSTRING:      [values]
+  METADATA_VALUE = 2,          // VALUE:         [type num, value num]
+  METADATA_NODE = 3,           // NODE:          [n x md num]
+  METADATA_NAME = 4,           // STRING:        [values]
+  METADATA_DISTINCT_NODE = 5,  // DISTINCT_NODE: [n x md num]
+  METADATA_KIND = 6,           // [n x [id, name]]
+  METADATA_LOCATION = 7,       // [distinct, line, col, scope, inlined-at?]
+  METADATA_OLD_NODE = 8,       // OLD_NODE:      [n x (type num, value num)]
+  METADATA_OLD_FN_NODE = 9,    // OLD_FN_NODE:   [n x (type num, value num)]
+  METADATA_NAMED_NODE = 10,    // NAMED_NODE:    [n x mdnodes]
+  METADATA_ATTACHMENT = 11,    // [m x [value, [n x [id, mdnode]]]
+  METADATA_GENERIC_DEBUG = 12, // [distinct, tag, vers, header, n x md num]
+  METADATA_SUBRANGE = 13,      // [distinct, count, lo]
+  METADATA_ENUMERATOR = 14,    // [distinct, value, name]
+  METADATA_BASIC_TYPE = 15,    // [distinct, tag, name, size, align, enc]
+  METADATA_FILE = 16, // [distinct, filename, directory, checksumkind, checksum]
+  METADATA_DERIVED_TYPE = 17,       // [distinct, ...]
+  METADATA_COMPOSITE_TYPE = 18,     // [distinct, ...]
+  METADATA_SUBROUTINE_TYPE = 19,    // [distinct, flags, types, cc]
+  METADATA_COMPILE_UNIT = 20,       // [distinct, ...]
+  METADATA_SUBPROGRAM = 21,         // [distinct, ...]
+  METADATA_LEXICAL_BLOCK = 22,      // [distinct, scope, file, line, column]
+  METADATA_LEXICAL_BLOCK_FILE = 23, //[distinct, scope, file, discriminator]
+  METADATA_NAMESPACE = 24, // [distinct, scope, file, name, line, exportSymbols]
+  METADATA_TEMPLATE_TYPE = 25,   // [distinct, scope, name, type, ...]
+  METADATA_TEMPLATE_VALUE = 26,  // [distinct, scope, name, type, value, ...]
+  METADATA_GLOBAL_VAR = 27,      // [distinct, ...]
+  METADATA_LOCAL_VAR = 28,       // [distinct, ...]
+  METADATA_EXPRESSION = 29,      // [distinct, n x element]
+  METADATA_OBJC_PROPERTY = 30,   // [distinct, name, file, line, ...]
+  METADATA_IMPORTED_ENTITY = 31, // [distinct, tag, scope, entity, line, name]
+  METADATA_MODULE = 32,          // [distinct, scope, name, ...]
+  METADATA_MACRO = 33,           // [distinct, macinfo, line, name, value]
+  METADATA_MACRO_FILE = 34,      // [distinct, macinfo, line, file, ...]
+  METADATA_STRINGS = 35,         // [count, offset] blob([lengths][chars])
+  METADATA_GLOBAL_DECL_ATTACHMENT = 36, // [valueid, n x [id, mdnode]]
+  METADATA_GLOBAL_VAR_EXPR = 37,        // [distinct, var, expr]
+  METADATA_INDEX_OFFSET = 38,           // [offset]
+  METADATA_INDEX = 39,                  // [bitpos]
+};
+
+// The constants block (CONSTANTS_BLOCK_ID) describes emission for each
+// constant and maintains an implicit current type value.
+enum ConstantsCodes {
+  CST_CODE_SETTYPE = 1,          // SETTYPE:       [typeid]
+  CST_CODE_NULL = 2,             // NULL
+  CST_CODE_UNDEF = 3,            // UNDEF
+  CST_CODE_INTEGER = 4,          // INTEGER:       [intval]
+  CST_CODE_WIDE_INTEGER = 5,     // WIDE_INTEGER:  [n x intval]
+  CST_CODE_FLOAT = 6,            // FLOAT:         [fpval]
+  CST_CODE_AGGREGATE = 7,        // AGGREGATE:     [n x value number]
+  CST_CODE_STRING = 8,           // STRING:        [values]
+  CST_CODE_CSTRING = 9,          // CSTRING:       [values]
+  CST_CODE_CE_BINOP = 10,        // CE_BINOP:      [opcode, opval, opval]
+  CST_CODE_CE_CAST = 11,         // CE_CAST:       [opcode, opty, opval]
+  CST_CODE_CE_GEP = 12,          // CE_GEP:        [n x operands]
+  CST_CODE_CE_SELECT = 13,       // CE_SELECT:     [opval, opval, opval]
+  CST_CODE_CE_EXTRACTELT = 14,   // CE_EXTRACTELT: [opty, opval, opval]
+  CST_CODE_CE_INSERTELT = 15,    // CE_INSERTELT:  [opval, opval, opval]
+  CST_CODE_CE_SHUFFLEVEC = 16,   // CE_SHUFFLEVEC: [opval, opval, opval]
+  CST_CODE_CE_CMP = 17,          // CE_CMP:        [opty, opval, opval, pred]
+  CST_CODE_INLINEASM_OLD = 18,   // INLINEASM:     [sideeffect|alignstack,
+                                 //                 asmstr,conststr]
+  CST_CODE_CE_SHUFVEC_EX = 19,   // SHUFVEC_EX:    [opty, opval, opval, opval]
+  CST_CODE_CE_INBOUNDS_GEP = 20, // INBOUNDS_GEP:  [n x operands]
+  CST_CODE_BLOCKADDRESS = 21,    // CST_CODE_BLOCKADDRESS [fnty, fnval, bb#]
+  CST_CODE_DATA = 22,            // DATA:          [n x elements]
+  CST_CODE_INLINEASM = 23,       // INLINEASM:     [sideeffect|alignstack|
+                                 //                 asmdialect,asmstr,conststr]
+  CST_CODE_CE_GEP_WITH_INRANGE_INDEX = 24, //      [opty, flags, n x operands]
+};
+
+/// CastOpcodes - These are values used in the bitcode files to encode which
+/// cast a CST_CODE_CE_CAST or a XXX refers to.  The values of these enums
+/// have no fixed relation to the LLVM IR enum values.  Changing these will
+/// break compatibility with old files.
+enum CastOpcodes {
+  CAST_TRUNC = 0,
+  CAST_ZEXT = 1,
+  CAST_SEXT = 2,
+  CAST_FPTOUI = 3,
+  CAST_FPTOSI = 4,
+  CAST_UITOFP = 5,
+  CAST_SITOFP = 6,
+  CAST_FPTRUNC = 7,
+  CAST_FPEXT = 8,
+  CAST_PTRTOINT = 9,
+  CAST_INTTOPTR = 10,
+  CAST_BITCAST = 11,
+  CAST_ADDRSPACECAST = 12
+};
+
+/// BinaryOpcodes - These are values used in the bitcode files to encode which
+/// binop a CST_CODE_CE_BINOP or a XXX refers to.  The values of these enums
+/// have no fixed relation to the LLVM IR enum values.  Changing these will
+/// break compatibility with old files.
+enum BinaryOpcodes {
+  BINOP_ADD = 0,
+  BINOP_SUB = 1,
+  BINOP_MUL = 2,
+  BINOP_UDIV = 3,
+  BINOP_SDIV = 4, // overloaded for FP
+  BINOP_UREM = 5,
+  BINOP_SREM = 6, // overloaded for FP
+  BINOP_SHL = 7,
+  BINOP_LSHR = 8,
+  BINOP_ASHR = 9,
+  BINOP_AND = 10,
+  BINOP_OR = 11,
+  BINOP_XOR = 12
+};
+
+/// These are values used in the bitcode files to encode AtomicRMW operations.
+/// The values of these enums have no fixed relation to the LLVM IR enum
+/// values.  Changing these will break compatibility with old files.
+enum RMWOperations {
+  RMW_XCHG = 0,
+  RMW_ADD = 1,
+  RMW_SUB = 2,
+  RMW_AND = 3,
+  RMW_NAND = 4,
+  RMW_OR = 5,
+  RMW_XOR = 6,
+  RMW_MAX = 7,
+  RMW_MIN = 8,
+  RMW_UMAX = 9,
+  RMW_UMIN = 10
+};
+
+/// OverflowingBinaryOperatorOptionalFlags - Flags for serializing
+/// OverflowingBinaryOperator's SubclassOptionalData contents.
+enum OverflowingBinaryOperatorOptionalFlags {
+  OBO_NO_UNSIGNED_WRAP = 0,
+  OBO_NO_SIGNED_WRAP = 1
+};
+
+/// PossiblyExactOperatorOptionalFlags - Flags for serializing
+/// PossiblyExactOperator's SubclassOptionalData contents.
+enum PossiblyExactOperatorOptionalFlags { PEO_EXACT = 0 };
+
+/// Encoded AtomicOrdering values.
+enum AtomicOrderingCodes {
+  ORDERING_NOTATOMIC = 0,
+  ORDERING_UNORDERED = 1,
+  ORDERING_MONOTONIC = 2,
+  ORDERING_ACQUIRE = 3,
+  ORDERING_RELEASE = 4,
+  ORDERING_ACQREL = 5,
+  ORDERING_SEQCST = 6
+};
+
+/// Encoded SynchronizationScope values.
+enum AtomicSynchScopeCodes {
+  SYNCHSCOPE_SINGLETHREAD = 0,
+  SYNCHSCOPE_CROSSTHREAD = 1
+};
+
+/// Markers and flags for call instruction.
+enum CallMarkersFlags {
+  CALL_TAIL = 0,
+  CALL_CCONV = 1,
+  CALL_MUSTTAIL = 14,
+  CALL_EXPLICIT_TYPE = 15,
+  CALL_NOTAIL = 16,
+  CALL_FMF = 17 // Call has optional fast-math-flags.
+};
+
+// The function body block (FUNCTION_BLOCK_ID) describes function bodies.  It
+// can contain a constant block (CONSTANTS_BLOCK_ID).
+enum FunctionCodes {
+  FUNC_CODE_DECLAREBLOCKS = 1, // DECLAREBLOCKS: [n]
+
+  FUNC_CODE_INST_BINOP = 2,      // BINOP:      [opcode, ty, opval, opval]
+  FUNC_CODE_INST_CAST = 3,       // CAST:       [opcode, ty, opty, opval]
+  FUNC_CODE_INST_GEP_OLD = 4,    // GEP:        [n x operands]
+  FUNC_CODE_INST_SELECT = 5,     // SELECT:     [ty, opval, opval, opval]
+  FUNC_CODE_INST_EXTRACTELT = 6, // EXTRACTELT: [opty, opval, opval]
+  FUNC_CODE_INST_INSERTELT = 7,  // INSERTELT:  [ty, opval, opval, opval]
+  FUNC_CODE_INST_SHUFFLEVEC = 8, // SHUFFLEVEC: [ty, opval, opval, opval]
+  FUNC_CODE_INST_CMP = 9,        // CMP:        [opty, opval, opval, pred]
+
+  FUNC_CODE_INST_RET = 10,    // RET:        [opty,opval<both optional>]
+  FUNC_CODE_INST_BR = 11,     // BR:         [bb#, bb#, cond] or [bb#]
+  FUNC_CODE_INST_SWITCH = 12, // SWITCH:     [opty, op0, op1, ...]
+  FUNC_CODE_INST_INVOKE = 13, // INVOKE:     [attr, fnty, op0,op1, ...]
+  // 14 is unused.
+  FUNC_CODE_INST_UNREACHABLE = 15, // UNREACHABLE
+
+  FUNC_CODE_INST_PHI = 16, // PHI:        [ty, val0,bb0, ...]
+  // 17 is unused.
+  // 18 is unused.
+  FUNC_CODE_INST_ALLOCA = 19, // ALLOCA:     [instty, opty, op, align]
+  FUNC_CODE_INST_LOAD = 20,   // LOAD:       [opty, op, align, vol]
+  // 21 is unused.
+  // 22 is unused.
+  FUNC_CODE_INST_VAARG = 23, // VAARG:      [valistty, valist, instty]
+  // This store code encodes the pointer type, rather than the value type
+  // this is so information only available in the pointer type (e.g. address
+  // spaces) is retained.
+  FUNC_CODE_INST_STORE_OLD = 24, // STORE:      [ptrty,ptr,val, align, vol]
+  // 25 is unused.
+  FUNC_CODE_INST_EXTRACTVAL = 26, // EXTRACTVAL: [n x operands]
+  FUNC_CODE_INST_INSERTVAL = 27,  // INSERTVAL:  [n x operands]
+  // fcmp/icmp returning Int1TY or vector of Int1Ty. Same as CMP, exists to
+  // support legacy vicmp/vfcmp instructions.
+  FUNC_CODE_INST_CMP2 = 28, // CMP2:       [opty, opval, opval, pred]
+  // new select on i1 or [N x i1]
+  FUNC_CODE_INST_VSELECT = 29, // VSELECT:    [ty,opval,opval,predty,pred]
+  FUNC_CODE_INST_INBOUNDS_GEP_OLD = 30, // INBOUNDS_GEP: [n x operands]
+  FUNC_CODE_INST_INDIRECTBR = 31,       // INDIRECTBR: [opty, op0, op1, ...]
+  // 32 is unused.
+  FUNC_CODE_DEBUG_LOC_AGAIN = 33, // DEBUG_LOC_AGAIN
+
+  FUNC_CODE_INST_CALL = 34, // CALL:    [attr, cc, fnty, fnid, args...]
+
+  FUNC_CODE_DEBUG_LOC = 35,        // DEBUG_LOC:  [Line,Col,ScopeVal, IAVal]
+  FUNC_CODE_INST_FENCE = 36,       // FENCE: [ordering, synchscope]
+  FUNC_CODE_INST_CMPXCHG_OLD = 37, // CMPXCHG: [ptrty,ptr,cmp,new, align, vol,
+                                   //           ordering, synchscope]
+  FUNC_CODE_INST_ATOMICRMW = 38,   // ATOMICRMW: [ptrty,ptr,val, operation,
+                                   //             align, vol,
+                                   //             ordering, synchscope]
+  FUNC_CODE_INST_RESUME = 39,      // RESUME:     [opval]
+  FUNC_CODE_INST_LANDINGPAD_OLD =
+      40,                         // LANDINGPAD: [ty,val,val,num,id0,val0...]
+  FUNC_CODE_INST_LOADATOMIC = 41, // LOAD: [opty, op, align, vol,
+                                  //        ordering, synchscope]
+  FUNC_CODE_INST_STOREATOMIC_OLD = 42, // STORE: [ptrty,ptr,val, align, vol
+                                       //         ordering, synchscope]
+  FUNC_CODE_INST_GEP = 43,             // GEP:  [inbounds, n x operands]
+  FUNC_CODE_INST_STORE = 44,       // STORE: [ptrty,ptr,valty,val, align, vol]
+  FUNC_CODE_INST_STOREATOMIC = 45, // STORE: [ptrty,ptr,val, align, vol
+  FUNC_CODE_INST_CMPXCHG = 46,     // CMPXCHG: [ptrty,ptr,valty,cmp,new, align,
+                                   //           vol,ordering,synchscope]
+  FUNC_CODE_INST_LANDINGPAD = 47,  // LANDINGPAD: [ty,val,num,id0,val0...]
+  FUNC_CODE_INST_CLEANUPRET = 48,  // CLEANUPRET: [val] or [val,bb#]
+  FUNC_CODE_INST_CATCHRET = 49,    // CATCHRET: [val,bb#]
+  FUNC_CODE_INST_CATCHPAD = 50,    // CATCHPAD: [bb#,bb#,num,args...]
+  FUNC_CODE_INST_CLEANUPPAD = 51,  // CLEANUPPAD: [num,args...]
+  FUNC_CODE_INST_CATCHSWITCH =
+      52, // CATCHSWITCH: [num,args...] or [num,args...,bb]
+  // 53 is unused.
+  // 54 is unused.
+  FUNC_CODE_OPERAND_BUNDLE = 55, // OPERAND_BUNDLE: [tag#, value...]
+};
+
+enum UseListCodes {
+  USELIST_CODE_DEFAULT = 1, // DEFAULT: [index..., value-id]
+  USELIST_CODE_BB = 2       // BB: [index..., bb-id]
+};
+
+enum AttributeKindCodes {
+  // = 0 is unused
+  ATTR_KIND_ALIGNMENT = 1,
+  ATTR_KIND_ALWAYS_INLINE = 2,
+  ATTR_KIND_BY_VAL = 3,
+  ATTR_KIND_INLINE_HINT = 4,
+  ATTR_KIND_IN_REG = 5,
+  ATTR_KIND_MIN_SIZE = 6,
+  ATTR_KIND_NAKED = 7,
+  ATTR_KIND_NEST = 8,
+  ATTR_KIND_NO_ALIAS = 9,
+  ATTR_KIND_NO_BUILTIN = 10,
+  ATTR_KIND_NO_CAPTURE = 11,
+  ATTR_KIND_NO_DUPLICATE = 12,
+  ATTR_KIND_NO_IMPLICIT_FLOAT = 13,
+  ATTR_KIND_NO_INLINE = 14,
+  ATTR_KIND_NON_LAZY_BIND = 15,
+  ATTR_KIND_NO_RED_ZONE = 16,
+  ATTR_KIND_NO_RETURN = 17,
+  ATTR_KIND_NO_UNWIND = 18,
+  ATTR_KIND_OPTIMIZE_FOR_SIZE = 19,
+  ATTR_KIND_READ_NONE = 20,
+  ATTR_KIND_READ_ONLY = 21,
+  ATTR_KIND_RETURNED = 22,
+  ATTR_KIND_RETURNS_TWICE = 23,
+  ATTR_KIND_S_EXT = 24,
+  ATTR_KIND_STACK_ALIGNMENT = 25,
+  ATTR_KIND_STACK_PROTECT = 26,
+  ATTR_KIND_STACK_PROTECT_REQ = 27,
+  ATTR_KIND_STACK_PROTECT_STRONG = 28,
+  ATTR_KIND_STRUCT_RET = 29,
+  ATTR_KIND_SANITIZE_ADDRESS = 30,
+  ATTR_KIND_SANITIZE_THREAD = 31,
+  ATTR_KIND_SANITIZE_MEMORY = 32,
+  ATTR_KIND_UW_TABLE = 33,
+  ATTR_KIND_Z_EXT = 34,
+  ATTR_KIND_BUILTIN = 35,
+  ATTR_KIND_COLD = 36,
+  ATTR_KIND_OPTIMIZE_NONE = 37,
+  ATTR_KIND_IN_ALLOCA = 38,
+  ATTR_KIND_NON_NULL = 39,
+  ATTR_KIND_JUMP_TABLE = 40,
+  ATTR_KIND_DEREFERENCEABLE = 41,
+  ATTR_KIND_DEREFERENCEABLE_OR_NULL = 42,
+  ATTR_KIND_CONVERGENT = 43,
+  ATTR_KIND_SAFESTACK = 44,
+  ATTR_KIND_ARGMEMONLY = 45,
+  ATTR_KIND_SWIFT_SELF = 46,
+  ATTR_KIND_SWIFT_ERROR = 47,
+  ATTR_KIND_NO_RECURSE = 48,
+  ATTR_KIND_INACCESSIBLEMEM_ONLY = 49,
+  ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50,
+  ATTR_KIND_ALLOC_SIZE = 51,
+  ATTR_KIND_WRITEONLY = 52,
+
+  // VISC Attributes
+  ATTR_KIND_IN = 53,
+  ATTR_KIND_OUT = 54,
+  ATTR_KIND_INOUT = 55
+};
+
+enum ComdatSelectionKindCodes {
+  COMDAT_SELECTION_KIND_ANY = 1,
+  COMDAT_SELECTION_KIND_EXACT_MATCH = 2,
+  COMDAT_SELECTION_KIND_LARGEST = 3,
+  COMDAT_SELECTION_KIND_NO_DUPLICATES = 4,
+  COMDAT_SELECTION_KIND_SAME_SIZE = 5,
+};
+
+} // End bitc namespace
+} // End llvm namespace
+
+#endif
diff --git a/llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h.patch b/llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h.patch
new file mode 100644
index 0000000000..8493994ff1
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/Bitcode/LLVMBitCodes.h.patch
@@ -0,0 +1,16 @@
+--- ../../../include/llvm/Bitcode/LLVMBitCodes.h	2019-12-29 18:23:33.020718342 -0600
++++ include/Bitcode/LLVMBitCodes.h	2019-12-29 18:49:23.479634563 -0600
+@@ -522,7 +522,12 @@
+   ATTR_KIND_INACCESSIBLEMEM_ONLY = 49,
+   ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50,
+   ATTR_KIND_ALLOC_SIZE = 51,
+-  ATTR_KIND_WRITEONLY = 52
++  ATTR_KIND_WRITEONLY = 52,
++
++  // VISC Attributes
++  ATTR_KIND_IN = 53,
++  ATTR_KIND_OUT = 54,
++  ATTR_KIND_INOUT = 55
+ };
+ 
+ enum ComdatSelectionKindCodes {
diff --git a/llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td b/llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td
new file mode 100644
index 0000000000..81e6605282
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td
@@ -0,0 +1,223 @@
+/// Attribute base class.
+class Attr<string S> {
+  // String representation of this attribute in the IR.
+  string AttrString = S;
+}
+
+/// Enum attribute.
+class EnumAttr<string S> : Attr<S>;
+
+/// StringBool attribute.
+class StrBoolAttr<string S> : Attr<S>;
+
+/// Target-independent enum attributes.
+
+/// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
+/// 0 means unaligned (different from align(1)).
+def Alignment : EnumAttr<"align">;
+
+/// The result of the function is guaranteed to point to a number of bytes that
+/// we can determine if we know the value of the function's arguments.
+def AllocSize : EnumAttr<"allocsize">;
+
+/// inline=always.
+def AlwaysInline : EnumAttr<"alwaysinline">;
+
+/// Function can access memory only using pointers based on its arguments.
+def ArgMemOnly : EnumAttr<"argmemonly">;
+
+/// Callee is recognized as a builtin, despite nobuiltin attribute on its
+/// declaration.
+def Builtin : EnumAttr<"builtin">;
+
+/// Pass structure by value.
+def ByVal : EnumAttr<"byval">;
+
+/// Marks function as being in a cold path.
+def Cold : EnumAttr<"cold">;
+
+/// Can only be moved to control-equivalent blocks.
+def Convergent : EnumAttr<"convergent">;
+
+/// Pointer is known to be dereferenceable.
+def Dereferenceable : EnumAttr<"dereferenceable">;
+
+/// Pointer is either null or dereferenceable.
+def DereferenceableOrNull : EnumAttr<"dereferenceable_or_null">;
+
+/// Function may only access memory that is inaccessible from IR.
+def InaccessibleMemOnly : EnumAttr<"inaccessiblememonly">;
+
+/// Function may only access memory that is either inaccessible from the IR,
+/// or pointed to by its pointer arguments.
+def InaccessibleMemOrArgMemOnly : EnumAttr<"inaccessiblemem_or_argmemonly">;
+
+/// Pass structure in an alloca.
+def InAlloca : EnumAttr<"inalloca">;
+
+/// Source said inlining was desirable.
+def InlineHint : EnumAttr<"inlinehint">;
+
+/// Force argument to be passed in register.
+def InReg : EnumAttr<"inreg">;
+
+/// Build jump-instruction tables and replace refs.
+def JumpTable : EnumAttr<"jumptable">;
+
+/// Function must be optimized for size first.
+def MinSize : EnumAttr<"minsize">;
+
+/// Naked function.
+def Naked : EnumAttr<"naked">;
+
+/// Nested function static chain.
+def Nest : EnumAttr<"nest">;
+
+/// Considered to not alias after call.
+def NoAlias : EnumAttr<"noalias">;
+
+/// Callee isn't recognized as a builtin.
+def NoBuiltin : EnumAttr<"nobuiltin">;
+
+/// Function creates no aliases of pointer.
+def NoCapture : EnumAttr<"nocapture">;
+
+/// Call cannot be duplicated.
+def NoDuplicate : EnumAttr<"noduplicate">;
+
+/// Disable implicit floating point insts.
+def NoImplicitFloat : EnumAttr<"noimplicitfloat">;
+
+/// inline=never.
+def NoInline : EnumAttr<"noinline">;
+
+/// Function is called early and/or often, so lazy binding isn't worthwhile.
+def NonLazyBind : EnumAttr<"nonlazybind">;
+
+/// Pointer is known to be not null.
+def NonNull : EnumAttr<"nonnull">;
+
+/// The function does not recurse.
+def NoRecurse : EnumAttr<"norecurse">;
+
+/// Disable redzone.
+def NoRedZone : EnumAttr<"noredzone">;
+
+/// Mark the function as not returning.
+def NoReturn : EnumAttr<"noreturn">;
+
+/// Function doesn't unwind stack.
+def NoUnwind : EnumAttr<"nounwind">;
+
+/// opt_size.
+def OptimizeForSize : EnumAttr<"optsize">;
+
+/// Function must not be optimized.
+def OptimizeNone : EnumAttr<"optnone">;
+
+/// Function does not access memory.
+def ReadNone : EnumAttr<"readnone">;
+
+/// Function only reads from memory.
+def ReadOnly : EnumAttr<"readonly">;
+
+/// Return value is always equal to this argument.
+def Returned : EnumAttr<"returned">;
+
+/// Function can return twice.
+def ReturnsTwice : EnumAttr<"returns_twice">;
+
+/// Safe Stack protection.
+def SafeStack : EnumAttr<"safestack">;
+
+/// Sign extended before/after call.
+def SExt : EnumAttr<"signext">;
+
+/// Alignment of stack for function (3 bits)  stored as log2 of alignment with
+/// +1 bias 0 means unaligned (different from alignstack=(1)).
+def StackAlignment : EnumAttr<"alignstack">;
+
+/// Stack protection.
+def StackProtect : EnumAttr<"ssp">;
+
+/// Stack protection required.
+def StackProtectReq : EnumAttr<"sspreq">;
+
+/// Strong Stack protection.
+def StackProtectStrong : EnumAttr<"sspstrong">;
+
+/// Hidden pointer to structure to return.
+def StructRet : EnumAttr<"sret">;
+
+/// AddressSanitizer is on.
+def SanitizeAddress : EnumAttr<"sanitize_address">;
+
+/// ThreadSanitizer is on.
+def SanitizeThread : EnumAttr<"sanitize_thread">;
+
+/// MemorySanitizer is on.
+def SanitizeMemory : EnumAttr<"sanitize_memory">;
+
+/// Argument is swift error.
+def SwiftError : EnumAttr<"swifterror">;
+
+/// Argument is swift self/context.
+def SwiftSelf : EnumAttr<"swiftself">;
+
+/// Function must be in a unwind table.
+def UWTable : EnumAttr<"uwtable">;
+
+/// Function only writes to memory.
+def WriteOnly : EnumAttr<"writeonly">;
+
+/// Zero extended before/after call.
+def ZExt : EnumAttr<"zeroext">;
+
+/// VISC Attributes
+/// Pointer to read only memory
+def In : EnumAttr<"in">;
+
+/// Pointer to write only memory
+def Out : EnumAttr<"out">;
+
+/// Pointer to read/write memory
+def InOut : EnumAttr<"inout">;
+
+/// Target-independent string attributes.
+def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
+def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
+def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">;
+def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">;
+def NoJumpTables : StrBoolAttr<"no-jump-tables">;
+
+class CompatRule<string F> {
+  // The name of the function called to check the attribute of the caller and
+  // callee and decide whether inlining should be allowed. The function's
+  // signature must match "bool(const Function&, const Function &)", where the
+  // first parameter is the reference to the caller and the second parameter is
+  // the reference to the callee. It must return false if the attributes of the
+  // caller and callee are incompatible, and true otherwise.
+  string CompatFunc = F;
+}
+
+def : CompatRule<"isEqual<SanitizeAddressAttr>">;
+def : CompatRule<"isEqual<SanitizeThreadAttr>">;
+def : CompatRule<"isEqual<SanitizeMemoryAttr>">;
+def : CompatRule<"isEqual<SafeStackAttr>">;
+
+class MergeRule<string F> {
+  // The name of the function called to merge the attributes of the caller and
+  // callee. The function's signature must match
+  // "void(Function&, const Function &)", where the first parameter is the
+  // reference to the caller and the second parameter is the reference to the
+  // callee.
+  string MergeFunc = F;
+}
+
+def : MergeRule<"setAND<LessPreciseFPMADAttr>">;
+def : MergeRule<"setAND<NoInfsFPMathAttr>">;
+def : MergeRule<"setAND<NoNansFPMathAttr>">;
+def : MergeRule<"setAND<UnsafeFPMathAttr>">;
+def : MergeRule<"setOR<NoImplicitFloatAttr>">;
+def : MergeRule<"setOR<NoJumpTablesAttr>">;
+def : MergeRule<"adjustCallerSSPLevel">;
diff --git a/llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td.patch b/llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td.patch
new file mode 100644
index 0000000000..429bf7b215
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/IR/Attributes.td.patch
@@ -0,0 +1,19 @@
+--- ../../../include/llvm/IR/Attributes.td	2019-12-29 18:23:33.793781744 -0600
++++ include/IR/Attributes.td	2019-12-30 00:56:38.540423304 -0600
+@@ -173,6 +173,16 @@
+ /// Zero extended before/after call.
+ def ZExt : EnumAttr<"zeroext">;
+ 
++/// VISC Attributes
++/// Pointer to read only memory
++def In : EnumAttr<"in">;
++
++/// Pointer to write only memory
++def Out : EnumAttr<"out">;
++
++/// Pointer to read/write memory
++def InOut : EnumAttr<"inout">;
++
+ /// Target-independent string attributes.
+ def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
+ def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
diff --git a/llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td b/llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td
new file mode 100644
index 0000000000..eed22c81b9
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td
@@ -0,0 +1,764 @@
+//===- Intrinsics.td - Defines all LLVM intrinsics ---------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines properties of all LLVM intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+include "llvm/CodeGen/ValueTypes.td"
+
+//===----------------------------------------------------------------------===//
+//  Properties we keep track of for intrinsics.
+//===----------------------------------------------------------------------===//
+
+class IntrinsicProperty;
+
+// Intr*Mem - Memory properties.  If no property is set, the worst case
+// is assumed (it may read and write any memory it can get access to and it may
+// have other side effects).
+
+// IntrNoMem - The intrinsic does not access memory or have any other side
+// effects.  It may be CSE'd deleted if dead, etc.
+def IntrNoMem : IntrinsicProperty;
+
+// IntrReadMem - This intrinsic only reads from memory. It does not write to
+// memory and has no other side effects. Therefore, it cannot be moved across
+// potentially aliasing stores. However, it can be reordered otherwise and can
+// be deleted if dead.
+def IntrReadMem : IntrinsicProperty;
+
+// IntrWriteMem - This intrinsic only writes to memory, but does not read from
+// memory, and has no other side effects. This means dead stores before calls
+// to this intrinsics may be removed.
+def IntrWriteMem : IntrinsicProperty;
+
+// IntrArgMemOnly - This intrinsic only accesses memory that its pointer-typed
+// argument(s) points to, but may access an unspecified amount. Other than
+// reads from and (possibly volatile) writes to memory, it has no side effects.
+def IntrArgMemOnly : IntrinsicProperty;
+
+// IntrInaccessibleMemOnly -- This intrinsic only accesses memory that is not
+// accessible by the module being compiled. This is a weaker form of IntrNoMem.
+def IntrInaccessibleMemOnly : IntrinsicProperty;
+
+// IntrInaccessibleMemOrArgMemOnly -- This intrinsic only accesses memory that
+// its pointer-typed arguments point to or memory that is not accessible
+// by the module being compiled. This is a weaker form of IntrArgMemOnly.
+def IntrInaccessibleMemOrArgMemOnly : IntrinsicProperty;
+
+// Commutative - This intrinsic is commutative: X op Y == Y op X.
+def Commutative : IntrinsicProperty;
+
+// Throws - This intrinsic can throw.
+def Throws : IntrinsicProperty;
+
+// NoCapture - The specified argument pointer is not captured by the intrinsic.
+class NoCapture<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// Returned - The specified argument is always the return value of the
+// intrinsic.
+class Returned<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// ReadOnly - The specified argument pointer is not written to through the
+// pointer by the intrinsic.
+class ReadOnly<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// WriteOnly - The intrinsic does not read memory through the specified
+// argument pointer.
+class WriteOnly<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+// ReadNone - The specified argument pointer is not dereferenced by the
+// intrinsic.
+class ReadNone<int argNo> : IntrinsicProperty {
+  int ArgNo = argNo;
+}
+
+def IntrNoReturn : IntrinsicProperty;
+
+// IntrNoduplicate - Calls to this intrinsic cannot be duplicated.
+// Parallels the noduplicate attribute on LLVM IR functions.
+def IntrNoDuplicate : IntrinsicProperty;
+
+// IntrConvergent - Calls to this intrinsic are convergent and may not be made
+// control-dependent on any additional values.
+// Parallels the convergent attribute on LLVM IR functions.
+def IntrConvergent : IntrinsicProperty;
+
+//===----------------------------------------------------------------------===//
+// Types used by intrinsics.
+//===----------------------------------------------------------------------===//
+
+class LLVMType<ValueType vt> {
+  ValueType VT = vt;
+}
+
+class LLVMQualPointerType<LLVMType elty, int addrspace>
+  : LLVMType<iPTR>{
+  LLVMType ElTy = elty;
+  int AddrSpace = addrspace;
+}
+
+class LLVMPointerType<LLVMType elty>
+  : LLVMQualPointerType<elty, 0>;
+
+class LLVMAnyPointerType<LLVMType elty>
+  : LLVMType<iPTRAny>{
+  LLVMType ElTy = elty;
+}
+
+// Match the type of another intrinsic parameter.  Number is an index into the
+// list of overloaded types for the intrinsic, excluding all the fixed types.
+// The Number value must refer to a previously listed type.  For example:
+//   Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_anyfloat_ty, LLVMMatchType<0>]>
+// has two overloaded types, the 2nd and 3rd arguments.  LLVMMatchType<0>
+// refers to the first overloaded type, which is the 2nd argument.
+class LLVMMatchType<int num>
+  : LLVMType<OtherVT>{
+  int Number = num;
+}
+
+// Match the type of another intrinsic parameter that is expected to be based on
+// an integral type (i.e. either iN or <N x iM>), but change the scalar size to
+// be twice as wide or half as wide as the other type.  This is only useful when
+// the intrinsic is overloaded, so the matched type should be declared as iAny.
+class LLVMExtendedType<int num> : LLVMMatchType<num>;
+class LLVMTruncatedType<int num> : LLVMMatchType<num>;
+class LLVMVectorSameWidth<int num, LLVMType elty>
+  : LLVMMatchType<num> {
+  ValueType ElTy = elty.VT;
+}
+class LLVMPointerTo<int num> : LLVMMatchType<num>;
+class LLVMPointerToElt<int num> : LLVMMatchType<num>;
+class LLVMVectorOfPointersToElt<int num> : LLVMMatchType<num>;
+
+// Match the type of another intrinsic parameter that is expected to be a
+// vector type, but change the element count to be half as many
+class LLVMHalfElementsVectorType<int num> : LLVMMatchType<num>;
+
+def llvm_void_ty       : LLVMType<isVoid>;
+def llvm_any_ty        : LLVMType<Any>;
+def llvm_anyint_ty     : LLVMType<iAny>;
+def llvm_anyfloat_ty   : LLVMType<fAny>;
+def llvm_anyvector_ty  : LLVMType<vAny>;
+def llvm_i1_ty         : LLVMType<i1>;
+def llvm_i8_ty         : LLVMType<i8>;
+def llvm_i16_ty        : LLVMType<i16>;
+def llvm_i32_ty        : LLVMType<i32>;
+def llvm_i64_ty        : LLVMType<i64>;
+def llvm_half_ty       : LLVMType<f16>;
+def llvm_float_ty      : LLVMType<f32>;
+def llvm_double_ty     : LLVMType<f64>;
+def llvm_f80_ty        : LLVMType<f80>;
+def llvm_f128_ty       : LLVMType<f128>;
+def llvm_ppcf128_ty    : LLVMType<ppcf128>;
+def llvm_ptr_ty        : LLVMPointerType<llvm_i8_ty>;             // i8*
+def llvm_ptrptr_ty     : LLVMPointerType<llvm_ptr_ty>;            // i8**
+def llvm_anyptr_ty     : LLVMAnyPointerType<llvm_i8_ty>;          // (space)i8*
+def llvm_empty_ty      : LLVMType<OtherVT>;                       // { }
+def llvm_descriptor_ty : LLVMPointerType<llvm_empty_ty>;          // { }*
+def llvm_metadata_ty   : LLVMType<MetadataVT>;                    // !{...}
+def llvm_token_ty      : LLVMType<token>;                         // token
+
+def llvm_x86mmx_ty     : LLVMType<x86mmx>;
+def llvm_ptrx86mmx_ty  : LLVMPointerType<llvm_x86mmx_ty>;         // <1 x i64>*
+
+def llvm_v2i1_ty       : LLVMType<v2i1>;     //   2 x i1
+def llvm_v4i1_ty       : LLVMType<v4i1>;     //   4 x i1
+def llvm_v8i1_ty       : LLVMType<v8i1>;     //   8 x i1
+def llvm_v16i1_ty      : LLVMType<v16i1>;    //  16 x i1
+def llvm_v32i1_ty      : LLVMType<v32i1>;    //  32 x i1
+def llvm_v64i1_ty      : LLVMType<v64i1>;    //  64 x i1
+def llvm_v512i1_ty     : LLVMType<v512i1>;   // 512 x i1
+def llvm_v1024i1_ty    : LLVMType<v1024i1>;  //1024 x i1
+
+def llvm_v1i8_ty       : LLVMType<v1i8>;     //  1 x i8
+def llvm_v2i8_ty       : LLVMType<v2i8>;     //  2 x i8
+def llvm_v4i8_ty       : LLVMType<v4i8>;     //  4 x i8
+def llvm_v8i8_ty       : LLVMType<v8i8>;     //  8 x i8
+def llvm_v16i8_ty      : LLVMType<v16i8>;    // 16 x i8
+def llvm_v32i8_ty      : LLVMType<v32i8>;    // 32 x i8
+def llvm_v64i8_ty      : LLVMType<v64i8>;    // 64 x i8
+def llvm_v128i8_ty     : LLVMType<v128i8>;   //128 x i8
+def llvm_v256i8_ty     : LLVMType<v256i8>;   //256 x i8
+
+def llvm_v1i16_ty      : LLVMType<v1i16>;    //  1 x i16
+def llvm_v2i16_ty      : LLVMType<v2i16>;    //  2 x i16
+def llvm_v4i16_ty      : LLVMType<v4i16>;    //  4 x i16
+def llvm_v8i16_ty      : LLVMType<v8i16>;    //  8 x i16
+def llvm_v16i16_ty     : LLVMType<v16i16>;   // 16 x i16
+def llvm_v32i16_ty     : LLVMType<v32i16>;   // 32 x i16
+def llvm_v64i16_ty     : LLVMType<v64i16>;   // 64 x i16
+def llvm_v128i16_ty    : LLVMType<v128i16>;  //128 x i16
+
+def llvm_v1i32_ty      : LLVMType<v1i32>;    //  1 x i32
+def llvm_v2i32_ty      : LLVMType<v2i32>;    //  2 x i32
+def llvm_v4i32_ty      : LLVMType<v4i32>;    //  4 x i32
+def llvm_v8i32_ty      : LLVMType<v8i32>;    //  8 x i32
+def llvm_v16i32_ty     : LLVMType<v16i32>;   // 16 x i32
+def llvm_v32i32_ty     : LLVMType<v32i32>;   // 32 x i32
+def llvm_v64i32_ty     : LLVMType<v64i32>;   // 64 x i32
+
+def llvm_v1i64_ty      : LLVMType<v1i64>;    //  1 x i64
+def llvm_v2i64_ty      : LLVMType<v2i64>;    //  2 x i64
+def llvm_v4i64_ty      : LLVMType<v4i64>;    //  4 x i64
+def llvm_v8i64_ty      : LLVMType<v8i64>;    //  8 x i64
+def llvm_v16i64_ty     : LLVMType<v16i64>;   // 16 x i64
+def llvm_v32i64_ty     : LLVMType<v32i64>;   // 32 x i64
+
+def llvm_v1i128_ty     : LLVMType<v1i128>;   //  1 x i128
+
+def llvm_v2f16_ty      : LLVMType<v2f16>;    //  2 x half (__fp16)
+def llvm_v4f16_ty      : LLVMType<v4f16>;    //  4 x half (__fp16)
+def llvm_v8f16_ty      : LLVMType<v8f16>;    //  8 x half (__fp16)
+def llvm_v1f32_ty      : LLVMType<v1f32>;    //  1 x float
+def llvm_v2f32_ty      : LLVMType<v2f32>;    //  2 x float
+def llvm_v4f32_ty      : LLVMType<v4f32>;    //  4 x float
+def llvm_v8f32_ty      : LLVMType<v8f32>;    //  8 x float
+def llvm_v16f32_ty     : LLVMType<v16f32>;   // 16 x float
+def llvm_v1f64_ty      : LLVMType<v1f64>;    //  1 x double
+def llvm_v2f64_ty      : LLVMType<v2f64>;    //  2 x double
+def llvm_v4f64_ty      : LLVMType<v4f64>;    //  4 x double
+def llvm_v8f64_ty      : LLVMType<v8f64>;    //  8 x double
+
+def llvm_vararg_ty     : LLVMType<isVoid>;   // this means vararg here
+
+
+//===----------------------------------------------------------------------===//
+// Intrinsic Definitions.
+//===----------------------------------------------------------------------===//
+
+// Intrinsic class - This is used to define one LLVM intrinsic.  The name of the
+// intrinsic definition should start with "int_", then match the LLVM intrinsic
+// name with the "llvm." prefix removed, and all "."s turned into "_"s.  For
+// example, llvm.bswap.i16 -> int_bswap_i16.
+//
+//  * RetTypes is a list containing the return types expected for the
+//    intrinsic.
+//  * ParamTypes is a list containing the parameter types expected for the
+//    intrinsic.
+//  * Properties can be set to describe the behavior of the intrinsic.
+//
+class SDPatternOperator;
+class Intrinsic<list<LLVMType> ret_types,
+                list<LLVMType> param_types = [],
+                list<IntrinsicProperty> properties = [],
+                string name = ""> : SDPatternOperator {
+  string LLVMName = name;
+  string TargetPrefix = "";   // Set to a prefix for target-specific intrinsics.
+  list<LLVMType> RetTypes = ret_types;
+  list<LLVMType> ParamTypes = param_types;
+  list<IntrinsicProperty> IntrProperties = properties;
+
+  bit isTarget = 0;
+}
+
+/// GCCBuiltin - If this intrinsic exactly corresponds to a GCC builtin, this
+/// specifies the name of the builtin.  This provides automatic CBE and CFE
+/// support.
+class GCCBuiltin<string name> {
+  string GCCBuiltinName = name;
+}
+
+class MSBuiltin<string name> {
+  string MSBuiltinName = name;
+}
+
+
+//===--------------- Variable Argument Handling Intrinsics ----------------===//
+//
+
+def int_vastart : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_start">;
+def int_vacopy  : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], [],
+                            "llvm.va_copy">;
+def int_vaend   : Intrinsic<[], [llvm_ptr_ty], [], "llvm.va_end">;
+
+//===------------------- Garbage Collection Intrinsics --------------------===//
+//
+def int_gcroot  : Intrinsic<[],
+                            [llvm_ptrptr_ty, llvm_ptr_ty]>;
+def int_gcread  : Intrinsic<[llvm_ptr_ty],
+                            [llvm_ptr_ty, llvm_ptrptr_ty],
+                            [IntrReadMem, IntrArgMemOnly]>;
+def int_gcwrite : Intrinsic<[],
+                            [llvm_ptr_ty, llvm_ptr_ty, llvm_ptrptr_ty],
+                            [IntrArgMemOnly, NoCapture<1>, NoCapture<2>]>;
+
+//===--------------------- Code Generator Intrinsics ----------------------===//
+//
+def int_returnaddress : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_addressofreturnaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+def int_frameaddress  : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty], [IntrNoMem]>;
+def int_read_register  : Intrinsic<[llvm_anyint_ty], [llvm_metadata_ty],
+                                   [IntrReadMem], "llvm.read_register">;
+def int_write_register : Intrinsic<[], [llvm_metadata_ty, llvm_anyint_ty],
+                                   [], "llvm.write_register">;
+
+// Gets the address of the local variable area. This is typically a copy of the
+// stack, frame, or base pointer depending on the type of prologue.
+def int_localaddress : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+
+// Escapes local variables to allow access from other functions.
+def int_localescape : Intrinsic<[], [llvm_vararg_ty]>;
+
+// Given a function and the localaddress of a parent frame, returns a pointer
+// to an escaped allocation indicated by the index.
+def int_localrecover : Intrinsic<[llvm_ptr_ty],
+                                 [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
+                                 [IntrNoMem]>;
+// Note: we treat stacksave/stackrestore as writemem because we don't otherwise
+// model their dependencies on allocas.
+def int_stacksave     : Intrinsic<[llvm_ptr_ty]>,
+                        GCCBuiltin<"__builtin_stack_save">;
+def int_stackrestore  : Intrinsic<[], [llvm_ptr_ty]>,
+                        GCCBuiltin<"__builtin_stack_restore">;
+
+def int_get_dynamic_area_offset : Intrinsic<[llvm_anyint_ty]>;
+
+def int_thread_pointer : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>,
+                         GCCBuiltin<"__builtin_thread_pointer">;
+
+// IntrArgMemOnly is more pessimistic than strictly necessary for prefetch,
+// however it does conveniently prevent the prefetch from being reordered
+// with respect to nearby accesses to the same memory.
+def int_prefetch      : Intrinsic<[],
+                                  [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty,
+                                   llvm_i32_ty],
+                                  [IntrArgMemOnly, NoCapture<0>]>;
+def int_pcmarker      : Intrinsic<[], [llvm_i32_ty]>;
+
+def int_readcyclecounter : Intrinsic<[llvm_i64_ty]>;
+
+// The assume intrinsic is marked as arbitrarily writing so that proper
+// control dependencies will be maintained.
+def int_assume        : Intrinsic<[], [llvm_i1_ty], []>;
+
+// Stack Protector Intrinsic - The stackprotector intrinsic writes the stack
+// guard to the correct place on the stack frame.
+def int_stackprotector : Intrinsic<[], [llvm_ptr_ty, llvm_ptrptr_ty], []>;
+def int_stackguard : Intrinsic<[llvm_ptr_ty], [], []>;
+
+// A counter increment for instrumentation based profiling.
+def int_instrprof_increment : Intrinsic<[],
+                                        [llvm_ptr_ty, llvm_i64_ty,
+                                         llvm_i32_ty, llvm_i32_ty],
+                                        []>;
+
+// A counter increment with step for instrumentation based profiling.
+def int_instrprof_increment_step : Intrinsic<[],
+                                        [llvm_ptr_ty, llvm_i64_ty,
+                                         llvm_i32_ty, llvm_i32_ty, llvm_i64_ty],
+                                        []>;
+
+// A call to profile runtime for value profiling of target expressions
+// through instrumentation based profiling.
+def int_instrprof_value_profile : Intrinsic<[],
+                                            [llvm_ptr_ty, llvm_i64_ty,
+                                             llvm_i64_ty, llvm_i32_ty,
+                                             llvm_i32_ty],
+                                            []>;
+
+//===------------------- Standard C Library Intrinsics --------------------===//
+//
+
+def int_memcpy  : Intrinsic<[],
+                             [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+                              llvm_i32_ty, llvm_i1_ty],
+                            [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
+                             WriteOnly<0>, ReadOnly<1>]>;
+def int_memmove : Intrinsic<[],
+                            [llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty,
+                             llvm_i32_ty, llvm_i1_ty],
+                            [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
+                             ReadOnly<1>]>;
+def int_memset  : Intrinsic<[],
+                            [llvm_anyptr_ty, llvm_i8_ty, llvm_anyint_ty,
+                             llvm_i32_ty, llvm_i1_ty],
+                            [IntrArgMemOnly, NoCapture<0>, WriteOnly<0>]>;
+
+let IntrProperties = [IntrNoMem] in {
+  def int_fma  : Intrinsic<[llvm_anyfloat_ty],
+                           [LLVMMatchType<0>, LLVMMatchType<0>,
+                            LLVMMatchType<0>]>;
+  def int_fmuladd : Intrinsic<[llvm_anyfloat_ty],
+                              [LLVMMatchType<0>, LLVMMatchType<0>,
+                               LLVMMatchType<0>]>;
+
+  // These functions do not read memory, but are sensitive to the
+  // rounding mode. LLVM purposely does not model changes to the FP
+  // environment so they can be treated as readnone.
+  def int_sqrt : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_powi : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty]>;
+  def int_sin  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_cos  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_pow  : Intrinsic<[llvm_anyfloat_ty],
+                           [LLVMMatchType<0>, LLVMMatchType<0>]>;
+  def int_log  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_log10: Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_log2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_exp  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_exp2 : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_fabs : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_copysign : Intrinsic<[llvm_anyfloat_ty],
+                               [LLVMMatchType<0>, LLVMMatchType<0>]>;
+  def int_floor : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_ceil  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_trunc : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_rint  : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_nearbyint : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_round : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>]>;
+  def int_canonicalize : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>],
+                                   [IntrNoMem]>;
+}
+
+def int_minnum : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, Commutative]
+>;
+def int_maxnum : Intrinsic<[llvm_anyfloat_ty],
+  [LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem, Commutative]
+>;
+
+// NOTE: these are internal interfaces.
+def int_setjmp     : Intrinsic<[llvm_i32_ty],  [llvm_ptr_ty]>;
+def int_longjmp    : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
+def int_sigsetjmp  : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>;
+def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty], [IntrNoReturn]>;
+
+// Internal interface for object size checking
+def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_anyptr_ty, llvm_i1_ty],
+                               [IntrNoMem]>,
+                               GCCBuiltin<"__builtin_object_size">;
+
+//===------------------------- Expect Intrinsics --------------------------===//
+//
+def int_expect : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>,
+                                              LLVMMatchType<0>], [IntrNoMem]>;
+
+//===-------------------- Bit Manipulation Intrinsics ---------------------===//
+//
+
+// None of these intrinsics accesses memory at all.
+let IntrProperties = [IntrNoMem] in {
+  def int_bswap: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+  def int_ctpop: Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+  def int_ctlz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
+  def int_cttz : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>, llvm_i1_ty]>;
+  def int_bitreverse : Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>]>;
+}
+
+//===------------------------ Debugger Intrinsics -------------------------===//
+//
+
+// None of these intrinsics accesses memory at all...but that doesn't mean the
+// optimizers can change them aggressively.  Special handling needed in a few
+// places.
+let IntrProperties = [IntrNoMem] in {
+  def int_dbg_declare      : Intrinsic<[],
+                                       [llvm_metadata_ty,
+                                       llvm_metadata_ty,
+                                       llvm_metadata_ty]>;
+  def int_dbg_value        : Intrinsic<[],
+                                       [llvm_metadata_ty, llvm_i64_ty,
+                                        llvm_metadata_ty,
+                                        llvm_metadata_ty]>;
+}
+
+//===------------------ Exception Handling Intrinsics----------------------===//
+//
+
+// The result of eh.typeid.for depends on the enclosing function, but inside a
+// given function it is 'const' and may be CSE'd etc.
+def int_eh_typeid_for : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+def int_eh_return_i32 : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty]>;
+def int_eh_return_i64 : Intrinsic<[], [llvm_i64_ty, llvm_ptr_ty]>;
+
+// eh.exceptionpointer returns the pointer to the exception caught by
+// the given `catchpad`.
+def int_eh_exceptionpointer : Intrinsic<[llvm_anyptr_ty], [llvm_token_ty],
+                                        [IntrNoMem]>;
+
+// Gets the exception code from a catchpad token. Only used on some platforms.
+def int_eh_exceptioncode : Intrinsic<[llvm_i32_ty], [llvm_token_ty], [IntrNoMem]>;
+
+// __builtin_unwind_init is an undocumented GCC intrinsic that causes all
+// callee-saved registers to be saved and restored (regardless of whether they
+// are used) in the calling function. It is used by libgcc_eh.
+def int_eh_unwind_init: Intrinsic<[]>,
+                        GCCBuiltin<"__builtin_unwind_init">;
+
+def int_eh_dwarf_cfa  : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty]>;
+
+let IntrProperties = [IntrNoMem] in {
+  def int_eh_sjlj_lsda             : Intrinsic<[llvm_ptr_ty]>;
+  def int_eh_sjlj_callsite         : Intrinsic<[], [llvm_i32_ty]>;
+}
+def int_eh_sjlj_functioncontext : Intrinsic<[], [llvm_ptr_ty]>;
+def int_eh_sjlj_setjmp          : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty]>;
+def int_eh_sjlj_longjmp         : Intrinsic<[], [llvm_ptr_ty], [IntrNoReturn]>;
+def int_eh_sjlj_setup_dispatch  : Intrinsic<[], []>;
+
+//===---------------- Generic Variable Attribute Intrinsics----------------===//
+//
+def int_var_annotation : Intrinsic<[],
+                                   [llvm_ptr_ty, llvm_ptr_ty,
+                                    llvm_ptr_ty, llvm_i32_ty],
+                                   [], "llvm.var.annotation">;
+def int_ptr_annotation : Intrinsic<[LLVMAnyPointerType<llvm_anyint_ty>],
+                                   [LLVMMatchType<0>, llvm_ptr_ty, llvm_ptr_ty,
+                                    llvm_i32_ty],
+                                   [], "llvm.ptr.annotation">;
+def int_annotation : Intrinsic<[llvm_anyint_ty],
+                               [LLVMMatchType<0>, llvm_ptr_ty,
+                                llvm_ptr_ty, llvm_i32_ty],
+                               [], "llvm.annotation">;
+
+//===------------------------ Trampoline Intrinsics -----------------------===//
+//
+def int_init_trampoline : Intrinsic<[],
+                                    [llvm_ptr_ty, llvm_ptr_ty, llvm_ptr_ty],
+                                    [IntrArgMemOnly, NoCapture<0>]>,
+                                   GCCBuiltin<"__builtin_init_trampoline">;
+
+def int_adjust_trampoline : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty],
+                                      [IntrReadMem, IntrArgMemOnly]>,
+                                     GCCBuiltin<"__builtin_adjust_trampoline">;
+
+//===------------------------ Overflow Intrinsics -------------------------===//
+//
+
+// Expose the carry flag from add operations on two integrals.
+def int_sadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem]>;
+def int_uadd_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem]>;
+
+def int_ssub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem]>;
+def int_usub_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem]>;
+
+def int_smul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem]>;
+def int_umul_with_overflow : Intrinsic<[llvm_anyint_ty, llvm_i1_ty],
+                                       [LLVMMatchType<0>, LLVMMatchType<0>],
+                                       [IntrNoMem]>;
+
+//===------------------------- Memory Use Markers -------------------------===//
+//
+def int_lifetime_start  : Intrinsic<[],
+                                    [llvm_i64_ty, llvm_ptr_ty],
+                                    [IntrArgMemOnly, NoCapture<1>]>;
+def int_lifetime_end    : Intrinsic<[],
+                                    [llvm_i64_ty, llvm_ptr_ty],
+                                    [IntrArgMemOnly, NoCapture<1>]>;
+def int_invariant_start : Intrinsic<[llvm_descriptor_ty],
+                                    [llvm_i64_ty, llvm_anyptr_ty],
+                                    [IntrArgMemOnly, NoCapture<1>]>;
+def int_invariant_end   : Intrinsic<[],
+                                    [llvm_descriptor_ty, llvm_i64_ty,
+                                     llvm_anyptr_ty],
+                                    [IntrArgMemOnly, NoCapture<2>]>;
+
+def int_invariant_group_barrier : Intrinsic<[llvm_ptr_ty],
+                                            [llvm_ptr_ty],
+                                            [IntrNoMem]>;
+
+//===------------------------ Stackmap Intrinsics -------------------------===//
+//
+def int_experimental_stackmap : Intrinsic<[],
+                                  [llvm_i64_ty, llvm_i32_ty, llvm_vararg_ty],
+                                  [Throws]>;
+def int_experimental_patchpoint_void : Intrinsic<[],
+                                                 [llvm_i64_ty, llvm_i32_ty,
+                                                  llvm_ptr_ty, llvm_i32_ty,
+                                                  llvm_vararg_ty],
+                                                  [Throws]>;
+def int_experimental_patchpoint_i64 : Intrinsic<[llvm_i64_ty],
+                                                [llvm_i64_ty, llvm_i32_ty,
+                                                 llvm_ptr_ty, llvm_i32_ty,
+                                                 llvm_vararg_ty],
+                                                 [Throws]>;
+
+
+//===------------------------ Garbage Collection Intrinsics ---------------===//
+// These are documented in docs/Statepoint.rst
+
+def int_experimental_gc_statepoint : Intrinsic<[llvm_token_ty],
+                               [llvm_i64_ty, llvm_i32_ty,
+                                llvm_anyptr_ty, llvm_i32_ty,
+                                llvm_i32_ty, llvm_vararg_ty],
+                                [Throws]>;
+
+def int_experimental_gc_result   : Intrinsic<[llvm_any_ty], [llvm_token_ty],
+                                             [IntrReadMem]>;
+def int_experimental_gc_relocate : Intrinsic<[llvm_any_ty],
+                                [llvm_token_ty, llvm_i32_ty, llvm_i32_ty],
+                                [IntrReadMem]>;
+
+//===------------------------ Coroutine Intrinsics ---------------===//
+// These are documented in docs/Coroutines.rst
+
+// Coroutine Structure Intrinsics.
+
+def int_coro_id : Intrinsic<[llvm_token_ty], [llvm_i32_ty, llvm_ptr_ty, 
+                             llvm_ptr_ty, llvm_ptr_ty], 
+                            [IntrArgMemOnly, IntrReadMem, 
+                             ReadNone<1>, ReadOnly<2>, NoCapture<2>]>;
+def int_coro_alloc : Intrinsic<[llvm_i1_ty], [llvm_token_ty], []>;
+def int_coro_begin : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty],
+                               [WriteOnly<1>]>;
+
+def int_coro_free : Intrinsic<[llvm_ptr_ty], [llvm_token_ty, llvm_ptr_ty], 
+                              [IntrReadMem, IntrArgMemOnly, ReadOnly<1>, 
+                               NoCapture<1>]>;
+def int_coro_end : Intrinsic<[], [llvm_ptr_ty, llvm_i1_ty], []>;
+
+def int_coro_frame : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+def int_coro_size : Intrinsic<[llvm_anyint_ty], [], [IntrNoMem]>;
+
+def int_coro_save : Intrinsic<[llvm_token_ty], [llvm_ptr_ty], []>;
+def int_coro_suspend : Intrinsic<[llvm_i8_ty], [llvm_token_ty, llvm_i1_ty], []>;
+
+def int_coro_param : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_ptr_ty],
+                               [IntrNoMem, ReadNone<0>, ReadNone<1>]>;
+
+// Coroutine Manipulation Intrinsics.
+
+def int_coro_resume : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
+def int_coro_destroy : Intrinsic<[], [llvm_ptr_ty], [Throws]>;
+def int_coro_done : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty],
+                              [IntrArgMemOnly, ReadOnly<0>, NoCapture<0>]>;
+def int_coro_promise : Intrinsic<[llvm_ptr_ty],
+                                 [llvm_ptr_ty, llvm_i32_ty, llvm_i1_ty],
+                                 [IntrNoMem, NoCapture<0>]>;
+
+// Coroutine Lowering Intrinsics. Used internally by coroutine passes.
+
+def int_coro_subfn_addr : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_i8_ty],
+                                    [IntrReadMem, IntrArgMemOnly, ReadOnly<0>,
+                                     NoCapture<0>]>;
+
+///===-------------------------- Other Intrinsics --------------------------===//
+//
+def int_flt_rounds : Intrinsic<[llvm_i32_ty]>,
+                     GCCBuiltin<"__builtin_flt_rounds">;
+def int_trap : Intrinsic<[], [], [IntrNoReturn]>,
+               GCCBuiltin<"__builtin_trap">;
+def int_debugtrap : Intrinsic<[]>,
+                    GCCBuiltin<"__builtin_debugtrap">;
+
+// Support for dynamic deoptimization (or de-specialization)
+def int_experimental_deoptimize : Intrinsic<[llvm_any_ty], [llvm_vararg_ty],
+                                            [Throws]>;
+
+// Support for speculative runtime guards
+def int_experimental_guard : Intrinsic<[], [llvm_i1_ty, llvm_vararg_ty],
+                                       [Throws]>;
+
+// NOP: calls/invokes to this intrinsic are removed by codegen
+def int_donothing : Intrinsic<[], [], [IntrNoMem]>;
+
+// Intrisics to support half precision floating point format
+let IntrProperties = [IntrNoMem] in {
+def int_convert_to_fp16   : Intrinsic<[llvm_i16_ty], [llvm_anyfloat_ty]>;
+def int_convert_from_fp16 : Intrinsic<[llvm_anyfloat_ty], [llvm_i16_ty]>;
+}
+
+// Clear cache intrinsic, default to ignore (ie. emit nothing)
+// maps to void __clear_cache() on supporting platforms
+def int_clear_cache : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty],
+                                [], "llvm.clear_cache">;
+
+//===-------------------------- Masked Intrinsics -------------------------===//
+//
+def int_masked_store : Intrinsic<[], [llvm_anyvector_ty,
+                                      LLVMAnyPointerType<LLVMMatchType<0>>,
+                                      llvm_i32_ty,
+                                      LLVMVectorSameWidth<0, llvm_i1_ty>],
+                                 [IntrArgMemOnly]>;
+
+def int_masked_load  : Intrinsic<[llvm_anyvector_ty],
+                                 [LLVMAnyPointerType<LLVMMatchType<0>>, llvm_i32_ty,
+                                  LLVMVectorSameWidth<0, llvm_i1_ty>, LLVMMatchType<0>],
+                                 [IntrReadMem, IntrArgMemOnly]>;
+
+def int_masked_gather: Intrinsic<[llvm_anyvector_ty],
+                                 [LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
+                                  LLVMVectorSameWidth<0, llvm_i1_ty>,
+                                  LLVMMatchType<0>],
+                                 [IntrReadMem]>;
+
+def int_masked_scatter: Intrinsic<[],
+                                  [llvm_anyvector_ty,
+                                   LLVMVectorOfPointersToElt<0>, llvm_i32_ty,
+                                   LLVMVectorSameWidth<0, llvm_i1_ty>]>;
+
+def int_masked_expandload: Intrinsic<[llvm_anyvector_ty],
+                                     [LLVMPointerToElt<0>,
+                                      LLVMVectorSameWidth<0, llvm_i1_ty>,
+                                      LLVMMatchType<0>],
+                                     [IntrReadMem]>;
+
+def int_masked_compressstore: Intrinsic<[],
+                                     [llvm_anyvector_ty,
+                                      LLVMPointerToElt<0>,
+                                      LLVMVectorSameWidth<0, llvm_i1_ty>],
+                                     [IntrArgMemOnly]>;
+
+// Test whether a pointer is associated with a type metadata identifier.
+def int_type_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
+                              [IntrNoMem]>;
+
+// Safely loads a function pointer from a virtual table pointer using type metadata.
+def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
+                                      [llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty],
+                                      [IntrNoMem]>;
+
+def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
+                                 [IntrReadMem, IntrArgMemOnly]>;
+
+//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
+//
+
+def int_memcpy_element_atomic  : Intrinsic<[],
+                                           [llvm_anyptr_ty, llvm_anyptr_ty,
+                                            llvm_i64_ty, llvm_i32_ty],
+                                 [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
+                                  WriteOnly<0>, ReadOnly<1>]>;
+
+//===----------------------------------------------------------------------===//
+// Target-specific intrinsics
+//===----------------------------------------------------------------------===//
+
+include "llvm/IR/IntrinsicsPowerPC.td"
+include "llvm/IR/IntrinsicsX86.td"
+include "llvm/IR/IntrinsicsARM.td"
+include "llvm/IR/IntrinsicsAArch64.td"
+include "llvm/IR/IntrinsicsXCore.td"
+include "llvm/IR/IntrinsicsHexagon.td"
+include "llvm/IR/IntrinsicsNVVM.td"
+include "llvm/IR/IntrinsicsMips.td"
+include "llvm/IR/IntrinsicsAMDGPU.td"
+include "llvm/IR/IntrinsicsBPF.td"
+include "llvm/IR/IntrinsicsSystemZ.td"
+include "llvm/IR/IntrinsicsWebAssembly.td"
+include "llvm/IR/IntrinsicsVISC.td"
diff --git a/llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td.patch b/llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td.patch
new file mode 100644
index 0000000000..cd27481ca5
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/IR/Intrinsics.td.patch
@@ -0,0 +1,7 @@
+--- ../../../include/llvm/IR/Intrinsics.td	2019-12-29 18:23:33.896790192 -0600
++++ include/IR/Intrinsics.td	2019-12-29 18:50:41.881046510 -0600
+@@ -761,3 +761,4 @@
+ include "llvm/IR/IntrinsicsBPF.td"
+ include "llvm/IR/IntrinsicsSystemZ.td"
+ include "llvm/IR/IntrinsicsWebAssembly.td"
++include "llvm/IR/IntrinsicsVISC.td"
diff --git a/llvm/tools/hpvm/llvm_patches/include/IR/IntrinsicsVISC.td b/llvm/tools/hpvm/llvm_patches/include/IR/IntrinsicsVISC.td
new file mode 100644
index 0000000000..ab22372d80
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/IR/IntrinsicsVISC.td
@@ -0,0 +1,328 @@
+//===- IntrinsicsVISC.td - Defines VISC intrinsics ---------*- tablegen -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all of the VISC-specific intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+let TargetPrefix = "visc" in {
+  /* All intrinsics start with "llvm.visc."
+   * As we do not want the compiler to mess with these intrinsics, we assume
+   * worst memory behavior for all these intrinsics.
+   */
+
+  /* Initialization intrinsic -
+   * i8* llvm.visc.setup(function*);
+   */
+  def int_visc_init : Intrinsic<[], [], []>;
+
+  /* Launch intrinsic - with streaming argument
+   * i8* llvm.visc.launch(i8*, ArgList*, i1);
+   */
+  def int_visc_launch : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                  llvm_ptr_ty, llvm_i1_ty], []>;
+
+  /* Push intrinsic - push data on streaming pipeline
+   * void llvm.visc.push(i8*, ArgList*);
+   */
+  def int_visc_push : Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty], []>;
+
+  /* Pop intrinsic - pop data from streaming pipeline
+   * i8* llvm.visc.pop(i8*);
+   */
+  def int_visc_pop : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
+  /* Cleanup intrinsic -
+   * void llvm.visc.cleanup(i8*);
+   */
+  def int_visc_cleanup : Intrinsic<[], [], []>;
+
+  /* Wait intrinsic -
+   * void llvm.visc.wait(graphID*);
+   */
+  def int_visc_wait : Intrinsic<[], [llvm_ptr_ty], []>;
+
+  /* Track memory intrinsic -
+   * void llvm.visc.trackMemory(i8*, i64);
+   */
+  def int_visc_trackMemory : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], []>;
+
+  /* Track memory intrinsic -
+   * void llvm.visc.untrackMemory(i8*);
+   */
+  def int_visc_untrackMemory : Intrinsic<[], [llvm_ptr_ty], []>;
+
+  /* Request memory intrinsic -
+   * void llvm.visc.requestMemory(i8*, i64);
+   */
+  def int_visc_requestMemory : Intrinsic<[], [llvm_ptr_ty, llvm_i64_ty], []>;
+
+  /* Create Node intrinsic -
+   * i8* llvm.visc.createNode(function*);
+   */
+  def int_visc_createNode : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
+  /* Create Node 1D array intrinsic -
+   * i8* llvm.visc.createNode1D(function*, i64);
+   */
+  def int_visc_createNode1D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                        llvm_i64_ty], []>;
+
+  /* Create Node 2D array intrinsic -
+   * i8* llvm.visc.createNode2D(function*, i64, i64);
+   */
+  def int_visc_createNode2D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                        llvm_i64_ty, llvm_i64_ty], []>;
+
+  /* Create Node 3D array intrinsic -
+   * i8* llvm.visc.createNode2D(function*, i64, i64, i64);
+   */
+  def int_visc_createNode3D : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                        llvm_i64_ty, llvm_i64_ty, llvm_i64_ty],
+                                        []>;
+
+  /* Create dataflow edge intrinsic -
+   * i8* llvm.visc.createEdge(i8*, i8*, i1, i32, i32, i1);
+   */
+  def int_visc_createEdge : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
+                                      llvm_i1_ty, llvm_i32_ty, llvm_i32_ty,
+                                      llvm_i1_ty],
+                                      []>;
+
+  /* Create bind input intrinsic -
+   * void llvm.visc.bind.input(i8*, i32, i32);
+   */
+  def int_visc_bind_input : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty,
+                                      llvm_i32_ty, llvm_i1_ty], []>;
+
+  /* Create bind output intrinsic -
+   * void llvm.visc.bind.output(i8*, i32, i32);
+   */
+  def int_visc_bind_output : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty,
+                                       llvm_i32_ty, llvm_i1_ty], []>;
+
+  /* Find associated dataflow node intrinsic -
+   * i8* llvm.visc.getNode();
+   */
+  def int_visc_getNode : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
+
+  /* Find parent dataflow node intrinsic -
+   * i8* llvm.visc.getParentNode(i8*);
+   */
+  def int_visc_getParentNode : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+  /* Find the number of dimensions of a dataflow node intrinsic -
+   * i32 llvm.visc.getNumDims(i8*);
+   */
+  def int_visc_getNumDims : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty], [IntrNoMem]>;
+
+  /* Find the unique indentifier of a dataflow node (with respect to his parent
+   * node) in the specified dimension intrinsic -
+   */
+
+  /* i64 llvm.visc.getNodeInstanceID.[xyz](i8*);
+   */
+  def int_visc_getNodeInstanceID_x : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
+                                               [IntrNoMem]>;
+
+  def int_visc_getNodeInstanceID_y : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
+                                               [IntrNoMem]>;
+
+  def int_visc_getNodeInstanceID_z : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
+                                               [IntrNoMem]>;
+
+  /* Find the number of instances of a dataflow node in the specified dimension
+   * intrinsic -
+   */
+
+  /* i64 llvm.visc.getNumNodeInstances.[xyz](i8*);
+   */
+  def int_visc_getNumNodeInstances_x : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
+                                                 [IntrNoMem]>;
+
+  def int_visc_getNumNodeInstances_y : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
+                                                 [IntrNoMem]>;
+
+  def int_visc_getNumNodeInstances_z : Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
+                                                 [IntrNoMem]>;
+
+  /* Local Barrier
+   * void llvm.visc.barrier();
+   */
+  def int_visc_barrier : Intrinsic<[], [], []>;
+
+  /* Memory allocation inside the graph
+   * i8* llvm.visc.malloc();
+   */
+  def int_visc_malloc : Intrinsic<[llvm_ptr_ty], [llvm_i64_ty], []>;
+
+  /* Find the vector length supported by target architecture
+   * intrinsic -
+   * i32 llvm.visc.getVectorLength();
+   */
+  def int_visc_getVectorLength : Intrinsic<[llvm_i32_ty], [], []>;
+
+  /* ============ Atomic intrinsics ============= */
+  // Atomic arithmetic operations
+  
+  /* i32 llvm.visc.atomic.cmpxchg(i32*, i32)*/
+  def int_visc_atomic_cmpxchg: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty,
+                                          llvm_i32_ty], []>;
+
+  /* i32 llvm.visc.atomic.add(i32*, i32)*/
+  def int_visc_atomic_add: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.sub(i32*, i32)*/
+  def int_visc_atomic_sub: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.xchg(i32*, i32)*/
+  def int_visc_atomic_xchg: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.inc(i32*, i32)*/
+  def int_visc_atomic_inc: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.dec(i32*, i32)*/
+  def int_visc_atomic_dec: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.min(i32*, i32)*/
+  def int_visc_atomic_min: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.umin(i32*, i32)*/
+  def int_visc_atomic_umin: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.maxi32*, i32)*/
+  def int_visc_atomic_max: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.umaxi32*, i32)*/
+  def int_visc_atomic_umax: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  // Atomic bitwise operations
+
+  /* i32 llvm.visc.atomic.and(i32*, i32)*/
+  def int_visc_atomic_and: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.or(i32*, i32)*/
+  def int_visc_atomic_or: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+
+  /* i32 llvm.visc.atomic.xor(i32*, i32)*/
+  def int_visc_atomic_xor: Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
+                                    []>;
+  /***************************************************************************/
+  /*                            ApproxHPVM intrinsics                        */
+  /***************************************************************************/
+
+  /* Tensor add intrinsic
+   * i8* llvm.visc.tensor.add(i8*, i8*);
+   */
+  def int_visc_tensor_add : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                   llvm_ptr_ty], []>;
+
+  /* Tensor mul intrinsic
+   * i8* llvm.visc.tensor.mul(i8*, i8*);
+   */
+  def int_visc_tensor_mul : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                   llvm_ptr_ty], []>;
+
+  /* Tensor relu intrinsic
+   * i8* llvm.visc.tensor.relu(i8*);
+   */
+  def int_visc_tensor_relu : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
+  /* Tensor clipped relu intrinsic
+   * i8* llvm.visc.tensor.clipped.relu(i8*);
+   */
+  def int_visc_tensor_clipped_relu : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
+  /* Tensor tanh intrinsic
+   * i8* llvm.visc.tensor.tanh(i8*);
+   */
+  def int_visc_tensor_tanh : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
+  /* Tensor sigmoid intrinsic
+   * i8* llvm.visc.tensor.sigmoid(i8*);
+   */
+  def int_visc_tensor_sigmoid : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
+  /* Tensor softmax intrinsic
+   * i8* llvm.visc.tensor.softmax(i8*);
+   */
+  def int_visc_tensor_softmax : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty], []>;
+
+  /* Tensor convolution intrinsic
+   * i8* llvm.visc.tensor.convolution(i8*, i8*, i32, i32, i32, i32);
+   */
+  def int_visc_tensor_convolution : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                                              llvm_ptr_ty,
+                                                              llvm_i32_ty,
+                                                              llvm_i32_ty,
+                                                              llvm_i32_ty,
+                                                              llvm_i32_ty], []>;
+
+  /* Tensor group convolution intrinsic
+   * i8* llvm.visc.tensor.group.convolution(i8*, i8*, i32, i32, i32, i32, i32, i32);
+   */
+  def int_visc_tensor_group_convolution : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                                              llvm_ptr_ty,
+                                                              llvm_i32_ty,
+                                                              llvm_i32_ty,
+                                                              llvm_i32_ty,
+                                                              llvm_i32_ty,
+							      llvm_i32_ty,
+							      llvm_i32_ty], []>;
+
+  /* Tensor BatchNorm intrinsic
+   * i8* llvm.visc.tensor.batchnorm(i8*, i8*, i8*, i8*, i8*, double);
+   */
+  def int_visc_tensor_batchnorm : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                                            llvm_ptr_ty,
+							    llvm_ptr_ty,
+							    llvm_ptr_ty,
+							    llvm_ptr_ty,
+                                                            llvm_double_ty], []>;
+
+
+  /* Tensor pool intrinsics: max, min, average
+   * i8* llvm.visc.tensor.pool.max(i8*, i32, i32, i32, i32, i32, i32);
+   * i8* llvm.visc.tensor.pool.min(i8*, i32, i32, i32, i32, i32, i32);
+   * i8* llvm.visc.tensor.pool.average(i8*, i32, i32, i32, i32, i32, i32);
+   */
+  def int_visc_tensor_pool_max : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty], []>;
+  def int_visc_tensor_pool_min : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty,
+                                                           llvm_i32_ty], []>;
+  def int_visc_tensor_pool_mean : Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty,
+                                                            llvm_i32_ty], []>;
+
+}
diff --git a/llvm/tools/hpvm/llvm_patches/include/IR/IntrinsicsVISC.td.patch b/llvm/tools/hpvm/llvm_patches/include/IR/IntrinsicsVISC.td.patch
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/llvm/tools/hpvm/llvm_patches/include/Support/Debug.h b/llvm/tools/hpvm/llvm_patches/include/Support/Debug.h
new file mode 100644
index 0000000000..3465c40336
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/include/Support/Debug.h
@@ -0,0 +1,104 @@
+//===- llvm/Support/Debug.h - Easy way to add debug output ------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a handy way of adding debugging information to your
+// code, without it being enabled all of the time, and without having to add
+// command line options to enable it.
+//
+// In particular, just wrap your code with the DEBUG() macro, and it will be
+// enabled automatically if you specify '-debug' on the command-line.
+// DEBUG() requires the DEBUG_TYPE macro to be defined. Set it to "foo" specify
+// that your debug code belongs to class "foo". Be careful that you only do
+// this after including Debug.h and not around any #include of headers. Headers
+// should define and undef the macro acround the code that needs to use the
+// DEBUG() macro. Then, on the command line, you can specify '-debug-only=foo'
+// to enable JUST the debug information for the foo class.
+//
+// When compiling without assertions, the -debug-* options and all code in
+// DEBUG() statements disappears, so it does not affect the runtime of the code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_SUPPORT_DEBUG_H
+#define LLVM_SUPPORT_DEBUG_H
+
+namespace llvm {
+
+class raw_ostream;
+
+#ifndef NDEBUG
+/// DebugFlag - This boolean is set to true if the '-debug' command line option
+/// is specified.  This should probably not be referenced directly, instead, use
+/// the DEBUG macro below.
+///
+extern bool DebugFlag;
+
+/// isCurrentDebugType - Return true if the specified string is the debug type
+/// specified on the command line, or if none was specified on the command line
+/// with the -debug-only=X option.
+///
+bool isCurrentDebugType(const char *Type);
+
+/// setCurrentDebugType - Set the current debug type, as if the -debug-only=X
+/// option were specified.  Note that DebugFlag also needs to be set to true for
+/// debug output to be produced.
+///
+void setCurrentDebugType(const char *Type);
+
+/// setCurrentDebugTypes - Set the current debug type, as if the
+/// -debug-only=X,Y,Z option were specified. Note that DebugFlag
+/// also needs to be set to true for debug output to be produced.
+///
+void setCurrentDebugTypes(const char **Types, unsigned Count);
+
+/// DEBUG_WITH_TYPE macro - This macro should be used by passes to emit debug
+/// information.  In the '-debug' option is specified on the commandline, and if
+/// this is a debug build, then the code specified as the option to the macro
+/// will be executed.  Otherwise it will not be.  Example:
+///
+/// DEBUG_WITH_TYPE("bitset", dbgs() << "Bitset contains: " << Bitset << "\n");
+///
+/// This will emit the debug information if -debug is present, and -debug-only
+/// is not specified, or is specified as "bitset".
+#define DEBUG_WITH_TYPE(TYPE, X)                                        \
+  do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType(TYPE)) { X; } \
+  } while (false)
+
+#else
+#define isCurrentDebugType(X) (false)
+#define setCurrentDebugType(X)
+#define setCurrentDebugTypes(X, N)
+#define DEBUG_WITH_TYPE(TYPE, X) do { } while (false)
+#endif
+
+/// EnableDebugBuffering - This defaults to false.  If true, the debug
+/// stream will install signal handlers to dump any buffered debug
+/// output.  It allows clients to selectively allow the debug stream
+/// to install signal handlers if they are certain there will be no
+/// conflict.
+///
+extern bool EnableDebugBuffering;
+
+/// dbgs() - This returns a reference to a raw_ostream for debugging
+/// messages.  If debugging is disabled it returns errs().  Use it
+/// like: dbgs() << "foo" << "bar";
+raw_ostream &dbgs();
+
+// DEBUG macro - This macro should be used by passes to emit debug information.
+// In the '-debug' option is specified on the commandline, and if this is a
+// debug build, then the code specified as the option to the macro will be
+// executed.  Otherwise it will not be.  Example:
+//
+// DEBUG(dbgs() << "Bitset contains: " << Bitset << "\n");
+//
+#define DEBUG(X) DEBUG_WITH_TYPE(DEBUG_TYPE, X)
+
+} // end namespace llvm
+
+#endif // LLVM_SUPPORT_DEBUG_H
diff --git a/llvm/tools/hpvm/llvm_patches/include/Support/Debug.h.patch b/llvm/tools/hpvm/llvm_patches/include/Support/Debug.h.patch
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp
new file mode 100644
index 0000000000..0d64ef41cc
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp
@@ -0,0 +1,1020 @@
+//===- LLLexer.cpp - Lexer for .ll Files ----------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implement the Lexer for .ll files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LLLexer.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SourceMgr.h"
+#include <cassert>
+#include <cctype>
+#include <cstdio>
+
+using namespace llvm;
+
+bool LLLexer::Error(LocTy ErrorLoc, const Twine &Msg) const {
+  ErrorInfo = SM.GetMessage(ErrorLoc, SourceMgr::DK_Error, Msg);
+  return true;
+}
+
+void LLLexer::Warning(LocTy WarningLoc, const Twine &Msg) const {
+  SM.PrintMessage(WarningLoc, SourceMgr::DK_Warning, Msg);
+}
+
+//===----------------------------------------------------------------------===//
+// Helper functions.
+//===----------------------------------------------------------------------===//
+
+// atoull - Convert an ascii string of decimal digits into the unsigned long
+// long representation... this does not have to do input error checking,
+// because we know that the input will be matched by a suitable regex...
+//
+uint64_t LLLexer::atoull(const char *Buffer, const char *End) {
+  uint64_t Result = 0;
+  for (; Buffer != End; Buffer++) {
+    uint64_t OldRes = Result;
+    Result *= 10;
+    Result += *Buffer-'0';
+    if (Result < OldRes) {  // Uh, oh, overflow detected!!!
+      Error("constant bigger than 64 bits detected!");
+      return 0;
+    }
+  }
+  return Result;
+}
+
+uint64_t LLLexer::HexIntToVal(const char *Buffer, const char *End) {
+  uint64_t Result = 0;
+  for (; Buffer != End; ++Buffer) {
+    uint64_t OldRes = Result;
+    Result *= 16;
+    Result += hexDigitValue(*Buffer);
+
+    if (Result < OldRes) {   // Uh, oh, overflow detected!!!
+      Error("constant bigger than 64 bits detected!");
+      return 0;
+    }
+  }
+  return Result;
+}
+
+void LLLexer::HexToIntPair(const char *Buffer, const char *End,
+                           uint64_t Pair[2]) {
+  Pair[0] = 0;
+  if (End - Buffer >= 16) {
+    for (int i = 0; i < 16; i++, Buffer++) {
+      assert(Buffer != End);
+      Pair[0] *= 16;
+      Pair[0] += hexDigitValue(*Buffer);
+    }
+  }
+  Pair[1] = 0;
+  for (int i = 0; i < 16 && Buffer != End; i++, Buffer++) {
+    Pair[1] *= 16;
+    Pair[1] += hexDigitValue(*Buffer);
+  }
+  if (Buffer != End)
+    Error("constant bigger than 128 bits detected!");
+}
+
+/// FP80HexToIntPair - translate an 80 bit FP80 number (20 hexits) into
+/// { low64, high16 } as usual for an APInt.
+void LLLexer::FP80HexToIntPair(const char *Buffer, const char *End,
+                           uint64_t Pair[2]) {
+  Pair[1] = 0;
+  for (int i=0; i<4 && Buffer != End; i++, Buffer++) {
+    assert(Buffer != End);
+    Pair[1] *= 16;
+    Pair[1] += hexDigitValue(*Buffer);
+  }
+  Pair[0] = 0;
+  for (int i = 0; i < 16 && Buffer != End; i++, Buffer++) {
+    Pair[0] *= 16;
+    Pair[0] += hexDigitValue(*Buffer);
+  }
+  if (Buffer != End)
+    Error("constant bigger than 128 bits detected!");
+}
+
+// UnEscapeLexed - Run through the specified buffer and change \xx codes to the
+// appropriate character.
+static void UnEscapeLexed(std::string &Str) {
+  if (Str.empty()) return;
+
+  char *Buffer = &Str[0], *EndBuffer = Buffer+Str.size();
+  char *BOut = Buffer;
+  for (char *BIn = Buffer; BIn != EndBuffer; ) {
+    if (BIn[0] == '\\') {
+      if (BIn < EndBuffer-1 && BIn[1] == '\\') {
+        *BOut++ = '\\'; // Two \ becomes one
+        BIn += 2;
+      } else if (BIn < EndBuffer-2 &&
+                 isxdigit(static_cast<unsigned char>(BIn[1])) &&
+                 isxdigit(static_cast<unsigned char>(BIn[2]))) {
+        *BOut = hexDigitValue(BIn[1]) * 16 + hexDigitValue(BIn[2]);
+        BIn += 3;                           // Skip over handled chars
+        ++BOut;
+      } else {
+        *BOut++ = *BIn++;
+      }
+    } else {
+      *BOut++ = *BIn++;
+    }
+  }
+  Str.resize(BOut-Buffer);
+}
+
+/// isLabelChar - Return true for [-a-zA-Z$._0-9].
+static bool isLabelChar(char C) {
+  return isalnum(static_cast<unsigned char>(C)) || C == '-' || C == '$' ||
+         C == '.' || C == '_';
+}
+
+/// isLabelTail - Return true if this pointer points to a valid end of a label.
+static const char *isLabelTail(const char *CurPtr) {
+  while (true) {
+    if (CurPtr[0] == ':') return CurPtr+1;
+    if (!isLabelChar(CurPtr[0])) return nullptr;
+    ++CurPtr;
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// Lexer definition.
+//===----------------------------------------------------------------------===//
+
+LLLexer::LLLexer(StringRef StartBuf, SourceMgr &sm, SMDiagnostic &Err,
+                 LLVMContext &C)
+  : CurBuf(StartBuf), ErrorInfo(Err), SM(sm), Context(C), APFloatVal(0.0) {
+  CurPtr = CurBuf.begin();
+}
+
+int LLLexer::getNextChar() {
+  char CurChar = *CurPtr++;
+  switch (CurChar) {
+  default: return (unsigned char)CurChar;
+  case 0:
+    // A nul character in the stream is either the end of the current buffer or
+    // a random nul in the file.  Disambiguate that here.
+    if (CurPtr-1 != CurBuf.end())
+      return 0;  // Just whitespace.
+
+    // Otherwise, return end of file.
+    --CurPtr;  // Another call to lex will return EOF again.
+    return EOF;
+  }
+}
+
+lltok::Kind LLLexer::LexToken() {
+  while (true) {
+    TokStart = CurPtr;
+
+    int CurChar = getNextChar();
+    switch (CurChar) {
+    default:
+      // Handle letters: [a-zA-Z_]
+      if (isalpha(static_cast<unsigned char>(CurChar)) || CurChar == '_')
+        return LexIdentifier();
+
+      return lltok::Error;
+    case EOF: return lltok::Eof;
+    case 0:
+    case ' ':
+    case '\t':
+    case '\n':
+    case '\r':
+      // Ignore whitespace.
+      continue;
+    case '+': return LexPositive();
+    case '@': return LexAt();
+    case '$': return LexDollar();
+    case '%': return LexPercent();
+    case '"': return LexQuote();
+    case '.':
+      if (const char *Ptr = isLabelTail(CurPtr)) {
+        CurPtr = Ptr;
+        StrVal.assign(TokStart, CurPtr-1);
+        return lltok::LabelStr;
+      }
+      if (CurPtr[0] == '.' && CurPtr[1] == '.') {
+        CurPtr += 2;
+        return lltok::dotdotdot;
+      }
+      return lltok::Error;
+    case ';':
+      SkipLineComment();
+      continue;
+    case '!': return LexExclaim();
+    case '#': return LexHash();
+    case '0': case '1': case '2': case '3': case '4':
+    case '5': case '6': case '7': case '8': case '9':
+    case '-':
+      return LexDigitOrNegative();
+    case '=': return lltok::equal;
+    case '[': return lltok::lsquare;
+    case ']': return lltok::rsquare;
+    case '{': return lltok::lbrace;
+    case '}': return lltok::rbrace;
+    case '<': return lltok::less;
+    case '>': return lltok::greater;
+    case '(': return lltok::lparen;
+    case ')': return lltok::rparen;
+    case ',': return lltok::comma;
+    case '*': return lltok::star;
+    case '|': return lltok::bar;
+    }
+  }
+}
+
+void LLLexer::SkipLineComment() {
+  while (true) {
+    if (CurPtr[0] == '\n' || CurPtr[0] == '\r' || getNextChar() == EOF)
+      return;
+  }
+}
+
+/// Lex all tokens that start with an @ character.
+///   GlobalVar   @\"[^\"]*\"
+///   GlobalVar   @[-a-zA-Z$._][-a-zA-Z$._0-9]*
+///   GlobalVarID @[0-9]+
+lltok::Kind LLLexer::LexAt() {
+  return LexVar(lltok::GlobalVar, lltok::GlobalID);
+}
+
+lltok::Kind LLLexer::LexDollar() {
+  if (const char *Ptr = isLabelTail(TokStart)) {
+    CurPtr = Ptr;
+    StrVal.assign(TokStart, CurPtr - 1);
+    return lltok::LabelStr;
+  }
+
+  // Handle DollarStringConstant: $\"[^\"]*\"
+  if (CurPtr[0] == '"') {
+    ++CurPtr;
+
+    while (true) {
+      int CurChar = getNextChar();
+
+      if (CurChar == EOF) {
+        Error("end of file in COMDAT variable name");
+        return lltok::Error;
+      }
+      if (CurChar == '"') {
+        StrVal.assign(TokStart + 2, CurPtr - 1);
+        UnEscapeLexed(StrVal);
+        if (StringRef(StrVal).find_first_of(0) != StringRef::npos) {
+          Error("Null bytes are not allowed in names");
+          return lltok::Error;
+        }
+        return lltok::ComdatVar;
+      }
+    }
+  }
+
+  // Handle ComdatVarName: $[-a-zA-Z$._][-a-zA-Z$._0-9]*
+  if (ReadVarName())
+    return lltok::ComdatVar;
+
+  return lltok::Error;
+}
+
+/// ReadString - Read a string until the closing quote.
+lltok::Kind LLLexer::ReadString(lltok::Kind kind) {
+  const char *Start = CurPtr;
+  while (true) {
+    int CurChar = getNextChar();
+
+    if (CurChar == EOF) {
+      Error("end of file in string constant");
+      return lltok::Error;
+    }
+    if (CurChar == '"') {
+      StrVal.assign(Start, CurPtr-1);
+      UnEscapeLexed(StrVal);
+      return kind;
+    }
+  }
+}
+
+/// ReadVarName - Read the rest of a token containing a variable name.
+bool LLLexer::ReadVarName() {
+  const char *NameStart = CurPtr;
+  if (isalpha(static_cast<unsigned char>(CurPtr[0])) ||
+      CurPtr[0] == '-' || CurPtr[0] == '$' ||
+      CurPtr[0] == '.' || CurPtr[0] == '_') {
+    ++CurPtr;
+    while (isalnum(static_cast<unsigned char>(CurPtr[0])) ||
+           CurPtr[0] == '-' || CurPtr[0] == '$' ||
+           CurPtr[0] == '.' || CurPtr[0] == '_')
+      ++CurPtr;
+
+    StrVal.assign(NameStart, CurPtr);
+    return true;
+  }
+  return false;
+}
+
+lltok::Kind LLLexer::LexVar(lltok::Kind Var, lltok::Kind VarID) {
+  // Handle StringConstant: \"[^\"]*\"
+  if (CurPtr[0] == '"') {
+    ++CurPtr;
+
+    while (true) {
+      int CurChar = getNextChar();
+
+      if (CurChar == EOF) {
+        Error("end of file in global variable name");
+        return lltok::Error;
+      }
+      if (CurChar == '"') {
+        StrVal.assign(TokStart+2, CurPtr-1);
+        UnEscapeLexed(StrVal);
+        if (StringRef(StrVal).find_first_of(0) != StringRef::npos) {
+          Error("Null bytes are not allowed in names");
+          return lltok::Error;
+        }
+        return Var;
+      }
+    }
+  }
+
+  // Handle VarName: [-a-zA-Z$._][-a-zA-Z$._0-9]*
+  if (ReadVarName())
+    return Var;
+
+  // Handle VarID: [0-9]+
+  if (isdigit(static_cast<unsigned char>(CurPtr[0]))) {
+    for (++CurPtr; isdigit(static_cast<unsigned char>(CurPtr[0])); ++CurPtr)
+      /*empty*/;
+
+    uint64_t Val = atoull(TokStart+1, CurPtr);
+    if ((unsigned)Val != Val)
+      Error("invalid value number (too large)!");
+    UIntVal = unsigned(Val);
+    return VarID;
+  }
+  return lltok::Error;
+}
+
+/// Lex all tokens that start with a % character.
+///   LocalVar   ::= %\"[^\"]*\"
+///   LocalVar   ::= %[-a-zA-Z$._][-a-zA-Z$._0-9]*
+///   LocalVarID ::= %[0-9]+
+lltok::Kind LLLexer::LexPercent() {
+  return LexVar(lltok::LocalVar, lltok::LocalVarID);
+}
+
+/// Lex all tokens that start with a " character.
+///   QuoteLabel        "[^"]+":
+///   StringConstant    "[^"]*"
+lltok::Kind LLLexer::LexQuote() {
+  lltok::Kind kind = ReadString(lltok::StringConstant);
+  if (kind == lltok::Error || kind == lltok::Eof)
+    return kind;
+
+  if (CurPtr[0] == ':') {
+    ++CurPtr;
+    if (StringRef(StrVal).find_first_of(0) != StringRef::npos) {
+      Error("Null bytes are not allowed in names");
+      kind = lltok::Error;
+    } else {
+      kind = lltok::LabelStr;
+    }
+  }
+
+  return kind;
+}
+
+/// Lex all tokens that start with a ! character.
+///    !foo
+///    !
+lltok::Kind LLLexer::LexExclaim() {
+  // Lex a metadata name as a MetadataVar.
+  if (isalpha(static_cast<unsigned char>(CurPtr[0])) ||
+      CurPtr[0] == '-' || CurPtr[0] == '$' ||
+      CurPtr[0] == '.' || CurPtr[0] == '_' || CurPtr[0] == '\\') {
+    ++CurPtr;
+    while (isalnum(static_cast<unsigned char>(CurPtr[0])) ||
+           CurPtr[0] == '-' || CurPtr[0] == '$' ||
+           CurPtr[0] == '.' || CurPtr[0] == '_' || CurPtr[0] == '\\')
+      ++CurPtr;
+
+    StrVal.assign(TokStart+1, CurPtr);   // Skip !
+    UnEscapeLexed(StrVal);
+    return lltok::MetadataVar;
+  }
+  return lltok::exclaim;
+}
+
+/// Lex all tokens that start with a # character.
+///    AttrGrpID ::= #[0-9]+
+lltok::Kind LLLexer::LexHash() {
+  // Handle AttrGrpID: #[0-9]+
+  if (isdigit(static_cast<unsigned char>(CurPtr[0]))) {
+    for (++CurPtr; isdigit(static_cast<unsigned char>(CurPtr[0])); ++CurPtr)
+      /*empty*/;
+
+    uint64_t Val = atoull(TokStart+1, CurPtr);
+    if ((unsigned)Val != Val)
+      Error("invalid value number (too large)!");
+    UIntVal = unsigned(Val);
+    return lltok::AttrGrpID;
+  }
+
+  return lltok::Error;
+}
+
+/// Lex a label, integer type, keyword, or hexadecimal integer constant.
+///    Label           [-a-zA-Z$._0-9]+:
+///    IntegerType     i[0-9]+
+///    Keyword         sdiv, float, ...
+///    HexIntConstant  [us]0x[0-9A-Fa-f]+
+lltok::Kind LLLexer::LexIdentifier() {
+  const char *StartChar = CurPtr;
+  const char *IntEnd = CurPtr[-1] == 'i' ? nullptr : StartChar;
+  const char *KeywordEnd = nullptr;
+
+  for (; isLabelChar(*CurPtr); ++CurPtr) {
+    // If we decide this is an integer, remember the end of the sequence.
+    if (!IntEnd && !isdigit(static_cast<unsigned char>(*CurPtr)))
+      IntEnd = CurPtr;
+    if (!KeywordEnd && !isalnum(static_cast<unsigned char>(*CurPtr)) &&
+        *CurPtr != '_')
+      KeywordEnd = CurPtr;
+  }
+
+  // If we stopped due to a colon, this really is a label.
+  if (*CurPtr == ':') {
+    StrVal.assign(StartChar-1, CurPtr++);
+    return lltok::LabelStr;
+  }
+
+  // Otherwise, this wasn't a label.  If this was valid as an integer type,
+  // return it.
+  if (!IntEnd) IntEnd = CurPtr;
+  if (IntEnd != StartChar) {
+    CurPtr = IntEnd;
+    uint64_t NumBits = atoull(StartChar, CurPtr);
+    if (NumBits < IntegerType::MIN_INT_BITS ||
+        NumBits > IntegerType::MAX_INT_BITS) {
+      Error("bitwidth for integer type out of range!");
+      return lltok::Error;
+    }
+    TyVal = IntegerType::get(Context, NumBits);
+    return lltok::Type;
+  }
+
+  // Otherwise, this was a letter sequence.  See which keyword this is.
+  if (!KeywordEnd) KeywordEnd = CurPtr;
+  CurPtr = KeywordEnd;
+  --StartChar;
+  StringRef Keyword(StartChar, CurPtr - StartChar);
+
+#define KEYWORD(STR)                                                           \
+  do {                                                                         \
+    if (Keyword == #STR)                                                       \
+      return lltok::kw_##STR;                                                  \
+  } while (false)
+
+  KEYWORD(true);    KEYWORD(false);
+  KEYWORD(declare); KEYWORD(define);
+  KEYWORD(global);  KEYWORD(constant);
+
+  KEYWORD(private);
+  KEYWORD(internal);
+  KEYWORD(available_externally);
+  KEYWORD(linkonce);
+  KEYWORD(linkonce_odr);
+  KEYWORD(weak); // Use as a linkage, and a modifier for "cmpxchg".
+  KEYWORD(weak_odr);
+  KEYWORD(appending);
+  KEYWORD(dllimport);
+  KEYWORD(dllexport);
+  KEYWORD(common);
+  KEYWORD(default);
+  KEYWORD(hidden);
+  KEYWORD(protected);
+  KEYWORD(unnamed_addr);
+  KEYWORD(local_unnamed_addr);
+  KEYWORD(externally_initialized);
+  KEYWORD(extern_weak);
+  KEYWORD(external);
+  KEYWORD(thread_local);
+  KEYWORD(localdynamic);
+  KEYWORD(initialexec);
+  KEYWORD(localexec);
+  KEYWORD(zeroinitializer);
+  KEYWORD(undef);
+  KEYWORD(null);
+  KEYWORD(none);
+  KEYWORD(to);
+  KEYWORD(caller);
+  KEYWORD(within);
+  KEYWORD(from);
+  KEYWORD(tail);
+  KEYWORD(musttail);
+  KEYWORD(notail);
+  KEYWORD(target);
+  KEYWORD(triple);
+  KEYWORD(source_filename);
+  KEYWORD(unwind);
+  KEYWORD(deplibs);             // FIXME: Remove in 4.0.
+  KEYWORD(datalayout);
+  KEYWORD(volatile);
+  KEYWORD(atomic);
+  KEYWORD(unordered);
+  KEYWORD(monotonic);
+  KEYWORD(acquire);
+  KEYWORD(release);
+  KEYWORD(acq_rel);
+  KEYWORD(seq_cst);
+  KEYWORD(singlethread);
+
+  KEYWORD(nnan);
+  KEYWORD(ninf);
+  KEYWORD(nsz);
+  KEYWORD(arcp);
+  KEYWORD(fast);
+  KEYWORD(nuw);
+  KEYWORD(nsw);
+  KEYWORD(exact);
+  KEYWORD(inbounds);
+  KEYWORD(inrange);
+  KEYWORD(align);
+  KEYWORD(addrspace);
+  KEYWORD(section);
+  KEYWORD(alias);
+  KEYWORD(ifunc);
+  KEYWORD(module);
+  KEYWORD(asm);
+  KEYWORD(sideeffect);
+  KEYWORD(alignstack);
+  KEYWORD(inteldialect);
+  KEYWORD(gc);
+  KEYWORD(prefix);
+  KEYWORD(prologue);
+
+  KEYWORD(ccc);
+  KEYWORD(fastcc);
+  KEYWORD(coldcc);
+  KEYWORD(x86_stdcallcc);
+  KEYWORD(x86_fastcallcc);
+  KEYWORD(x86_thiscallcc);
+  KEYWORD(x86_vectorcallcc);
+  KEYWORD(arm_apcscc);
+  KEYWORD(arm_aapcscc);
+  KEYWORD(arm_aapcs_vfpcc);
+  KEYWORD(msp430_intrcc);
+  KEYWORD(avr_intrcc);
+  KEYWORD(avr_signalcc);
+  KEYWORD(ptx_kernel);
+  KEYWORD(ptx_device);
+  KEYWORD(spir_kernel);
+  KEYWORD(spir_func);
+  KEYWORD(intel_ocl_bicc);
+  KEYWORD(x86_64_sysvcc);
+  KEYWORD(x86_64_win64cc);
+  KEYWORD(x86_regcallcc);
+  KEYWORD(webkit_jscc);
+  KEYWORD(swiftcc);
+  KEYWORD(anyregcc);
+  KEYWORD(preserve_mostcc);
+  KEYWORD(preserve_allcc);
+  KEYWORD(ghccc);
+  KEYWORD(x86_intrcc);
+  KEYWORD(hhvmcc);
+  KEYWORD(hhvm_ccc);
+  KEYWORD(cxx_fast_tlscc);
+  KEYWORD(amdgpu_vs);
+  KEYWORD(amdgpu_gs);
+  KEYWORD(amdgpu_ps);
+  KEYWORD(amdgpu_cs);
+  KEYWORD(amdgpu_kernel);
+
+  KEYWORD(cc);
+  KEYWORD(c);
+
+  KEYWORD(attributes);
+
+  KEYWORD(alwaysinline);
+  KEYWORD(allocsize);
+  KEYWORD(argmemonly);
+  KEYWORD(builtin);
+  KEYWORD(byval);
+  KEYWORD(inalloca);
+  KEYWORD(cold);
+  KEYWORD(convergent);
+  KEYWORD(dereferenceable);
+  KEYWORD(dereferenceable_or_null);
+  KEYWORD(inaccessiblememonly);
+  KEYWORD(inaccessiblemem_or_argmemonly);
+  KEYWORD(inlinehint);
+  KEYWORD(inreg);
+  KEYWORD(jumptable);
+  KEYWORD(minsize);
+  KEYWORD(naked);
+  KEYWORD(nest);
+  KEYWORD(noalias);
+  KEYWORD(nobuiltin);
+  KEYWORD(nocapture);
+  KEYWORD(noduplicate);
+  KEYWORD(noimplicitfloat);
+  KEYWORD(noinline);
+  KEYWORD(norecurse);
+  KEYWORD(nonlazybind);
+  KEYWORD(nonnull);
+  KEYWORD(noredzone);
+  KEYWORD(noreturn);
+  KEYWORD(nounwind);
+  KEYWORD(optnone);
+  KEYWORD(optsize);
+  KEYWORD(readnone);
+  KEYWORD(readonly);
+  KEYWORD(returned);
+  KEYWORD(returns_twice);
+  KEYWORD(signext);
+  KEYWORD(sret);
+  KEYWORD(ssp);
+  KEYWORD(sspreq);
+  KEYWORD(sspstrong);
+  KEYWORD(safestack);
+  KEYWORD(sanitize_address);
+  KEYWORD(sanitize_thread);
+  KEYWORD(sanitize_memory);
+  KEYWORD(swifterror);
+  KEYWORD(swiftself);
+  KEYWORD(uwtable);
+  KEYWORD(writeonly);
+  KEYWORD(zeroext);
+  // VISC parameter attributes
+  KEYWORD(in);
+  KEYWORD(out);
+  KEYWORD(inout);
+
+  KEYWORD(type);
+  KEYWORD(opaque);
+
+  KEYWORD(comdat);
+
+  // Comdat types
+  KEYWORD(any);
+  KEYWORD(exactmatch);
+  KEYWORD(largest);
+  KEYWORD(noduplicates);
+  KEYWORD(samesize);
+
+  KEYWORD(eq); KEYWORD(ne); KEYWORD(slt); KEYWORD(sgt); KEYWORD(sle);
+  KEYWORD(sge); KEYWORD(ult); KEYWORD(ugt); KEYWORD(ule); KEYWORD(uge);
+  KEYWORD(oeq); KEYWORD(one); KEYWORD(olt); KEYWORD(ogt); KEYWORD(ole);
+  KEYWORD(oge); KEYWORD(ord); KEYWORD(uno); KEYWORD(ueq); KEYWORD(une);
+
+  KEYWORD(xchg); KEYWORD(nand); KEYWORD(max); KEYWORD(min); KEYWORD(umax);
+  KEYWORD(umin);
+
+  KEYWORD(x);
+  KEYWORD(blockaddress);
+
+  // Metadata types.
+  KEYWORD(distinct);
+
+  // Use-list order directives.
+  KEYWORD(uselistorder);
+  KEYWORD(uselistorder_bb);
+
+  KEYWORD(personality);
+  KEYWORD(cleanup);
+  KEYWORD(catch);
+  KEYWORD(filter);
+
+#undef KEYWORD
+
+  // Keywords for types.
+#define TYPEKEYWORD(STR, LLVMTY)                                               \
+  do {                                                                         \
+    if (Keyword == STR) {                                                      \
+      TyVal = LLVMTY;                                                          \
+      return lltok::Type;                                                      \
+    }                                                                          \
+  } while (false)
+
+  TYPEKEYWORD("void",      Type::getVoidTy(Context));
+  TYPEKEYWORD("half",      Type::getHalfTy(Context));
+  TYPEKEYWORD("float",     Type::getFloatTy(Context));
+  TYPEKEYWORD("double",    Type::getDoubleTy(Context));
+  TYPEKEYWORD("x86_fp80",  Type::getX86_FP80Ty(Context));
+  TYPEKEYWORD("fp128",     Type::getFP128Ty(Context));
+  TYPEKEYWORD("ppc_fp128", Type::getPPC_FP128Ty(Context));
+  TYPEKEYWORD("label",     Type::getLabelTy(Context));
+  TYPEKEYWORD("metadata",  Type::getMetadataTy(Context));
+  TYPEKEYWORD("x86_mmx",   Type::getX86_MMXTy(Context));
+  TYPEKEYWORD("token",     Type::getTokenTy(Context));
+
+#undef TYPEKEYWORD
+
+  // Keywords for instructions.
+#define INSTKEYWORD(STR, Enum)                                                 \
+  do {                                                                         \
+    if (Keyword == #STR) {                                                     \
+      UIntVal = Instruction::Enum;                                             \
+      return lltok::kw_##STR;                                                  \
+    }                                                                          \
+  } while (false)
+
+  INSTKEYWORD(add,   Add);  INSTKEYWORD(fadd,   FAdd);
+  INSTKEYWORD(sub,   Sub);  INSTKEYWORD(fsub,   FSub);
+  INSTKEYWORD(mul,   Mul);  INSTKEYWORD(fmul,   FMul);
+  INSTKEYWORD(udiv,  UDiv); INSTKEYWORD(sdiv,  SDiv); INSTKEYWORD(fdiv,  FDiv);
+  INSTKEYWORD(urem,  URem); INSTKEYWORD(srem,  SRem); INSTKEYWORD(frem,  FRem);
+  INSTKEYWORD(shl,   Shl);  INSTKEYWORD(lshr,  LShr); INSTKEYWORD(ashr,  AShr);
+  INSTKEYWORD(and,   And);  INSTKEYWORD(or,    Or);   INSTKEYWORD(xor,   Xor);
+  INSTKEYWORD(icmp,  ICmp); INSTKEYWORD(fcmp,  FCmp);
+
+  INSTKEYWORD(phi,         PHI);
+  INSTKEYWORD(call,        Call);
+  INSTKEYWORD(trunc,       Trunc);
+  INSTKEYWORD(zext,        ZExt);
+  INSTKEYWORD(sext,        SExt);
+  INSTKEYWORD(fptrunc,     FPTrunc);
+  INSTKEYWORD(fpext,       FPExt);
+  INSTKEYWORD(uitofp,      UIToFP);
+  INSTKEYWORD(sitofp,      SIToFP);
+  INSTKEYWORD(fptoui,      FPToUI);
+  INSTKEYWORD(fptosi,      FPToSI);
+  INSTKEYWORD(inttoptr,    IntToPtr);
+  INSTKEYWORD(ptrtoint,    PtrToInt);
+  INSTKEYWORD(bitcast,     BitCast);
+  INSTKEYWORD(addrspacecast, AddrSpaceCast);
+  INSTKEYWORD(select,      Select);
+  INSTKEYWORD(va_arg,      VAArg);
+  INSTKEYWORD(ret,         Ret);
+  INSTKEYWORD(br,          Br);
+  INSTKEYWORD(switch,      Switch);
+  INSTKEYWORD(indirectbr,  IndirectBr);
+  INSTKEYWORD(invoke,      Invoke);
+  INSTKEYWORD(resume,      Resume);
+  INSTKEYWORD(unreachable, Unreachable);
+
+  INSTKEYWORD(alloca,      Alloca);
+  INSTKEYWORD(load,        Load);
+  INSTKEYWORD(store,       Store);
+  INSTKEYWORD(cmpxchg,     AtomicCmpXchg);
+  INSTKEYWORD(atomicrmw,   AtomicRMW);
+  INSTKEYWORD(fence,       Fence);
+  INSTKEYWORD(getelementptr, GetElementPtr);
+
+  INSTKEYWORD(extractelement, ExtractElement);
+  INSTKEYWORD(insertelement,  InsertElement);
+  INSTKEYWORD(shufflevector,  ShuffleVector);
+  INSTKEYWORD(extractvalue,   ExtractValue);
+  INSTKEYWORD(insertvalue,    InsertValue);
+  INSTKEYWORD(landingpad,     LandingPad);
+  INSTKEYWORD(cleanupret,     CleanupRet);
+  INSTKEYWORD(catchret,       CatchRet);
+  INSTKEYWORD(catchswitch,  CatchSwitch);
+  INSTKEYWORD(catchpad,     CatchPad);
+  INSTKEYWORD(cleanuppad,   CleanupPad);
+
+#undef INSTKEYWORD
+
+#define DWKEYWORD(TYPE, TOKEN)                                                 \
+  do {                                                                         \
+    if (Keyword.startswith("DW_" #TYPE "_")) {                                 \
+      StrVal.assign(Keyword.begin(), Keyword.end());                           \
+      return lltok::TOKEN;                                                     \
+    }                                                                          \
+  } while (false)
+
+  DWKEYWORD(TAG, DwarfTag);
+  DWKEYWORD(ATE, DwarfAttEncoding);
+  DWKEYWORD(VIRTUALITY, DwarfVirtuality);
+  DWKEYWORD(LANG, DwarfLang);
+  DWKEYWORD(CC, DwarfCC);
+  DWKEYWORD(OP, DwarfOp);
+  DWKEYWORD(MACINFO, DwarfMacinfo);
+
+#undef DWKEYWORD
+
+  if (Keyword.startswith("DIFlag")) {
+    StrVal.assign(Keyword.begin(), Keyword.end());
+    return lltok::DIFlag;
+  }
+
+  if (Keyword.startswith("CSK_")) {
+    StrVal.assign(Keyword.begin(), Keyword.end());
+    return lltok::ChecksumKind;
+  }
+
+  if (Keyword == "NoDebug" || Keyword == "FullDebug" ||
+      Keyword == "LineTablesOnly") {
+    StrVal.assign(Keyword.begin(), Keyword.end());
+    return lltok::EmissionKind;
+  }
+
+  // Check for [us]0x[0-9A-Fa-f]+ which are Hexadecimal constant generated by
+  // the CFE to avoid forcing it to deal with 64-bit numbers.
+  if ((TokStart[0] == 'u' || TokStart[0] == 's') &&
+      TokStart[1] == '0' && TokStart[2] == 'x' &&
+      isxdigit(static_cast<unsigned char>(TokStart[3]))) {
+    int len = CurPtr-TokStart-3;
+    uint32_t bits = len * 4;
+    StringRef HexStr(TokStart + 3, len);
+    if (!all_of(HexStr, isxdigit)) {
+      // Bad token, return it as an error.
+      CurPtr = TokStart+3;
+      return lltok::Error;
+    }
+    APInt Tmp(bits, HexStr, 16);
+    uint32_t activeBits = Tmp.getActiveBits();
+    if (activeBits > 0 && activeBits < bits)
+      Tmp = Tmp.trunc(activeBits);
+    APSIntVal = APSInt(Tmp, TokStart[0] == 'u');
+    return lltok::APSInt;
+  }
+
+  // If this is "cc1234", return this as just "cc".
+  if (TokStart[0] == 'c' && TokStart[1] == 'c') {
+    CurPtr = TokStart+2;
+    return lltok::kw_cc;
+  }
+
+  // Finally, if this isn't known, return an error.
+  CurPtr = TokStart+1;
+  return lltok::Error;
+}
+
+/// Lex all tokens that start with a 0x prefix, knowing they match and are not
+/// labels.
+///    HexFPConstant     0x[0-9A-Fa-f]+
+///    HexFP80Constant   0xK[0-9A-Fa-f]+
+///    HexFP128Constant  0xL[0-9A-Fa-f]+
+///    HexPPC128Constant 0xM[0-9A-Fa-f]+
+///    HexHalfConstant   0xH[0-9A-Fa-f]+
+lltok::Kind LLLexer::Lex0x() {
+  CurPtr = TokStart + 2;
+
+  char Kind;
+  if ((CurPtr[0] >= 'K' && CurPtr[0] <= 'M') || CurPtr[0] == 'H') {
+    Kind = *CurPtr++;
+  } else {
+    Kind = 'J';
+  }
+
+  if (!isxdigit(static_cast<unsigned char>(CurPtr[0]))) {
+    // Bad token, return it as an error.
+    CurPtr = TokStart+1;
+    return lltok::Error;
+  }
+
+  while (isxdigit(static_cast<unsigned char>(CurPtr[0])))
+    ++CurPtr;
+
+  if (Kind == 'J') {
+    // HexFPConstant - Floating point constant represented in IEEE format as a
+    // hexadecimal number for when exponential notation is not precise enough.
+    // Half, Float, and double only.
+    APFloatVal = APFloat(APFloat::IEEEdouble(),
+                         APInt(64, HexIntToVal(TokStart + 2, CurPtr)));
+    return lltok::APFloat;
+  }
+
+  uint64_t Pair[2];
+  switch (Kind) {
+  default: llvm_unreachable("Unknown kind!");
+  case 'K':
+    // F80HexFPConstant - x87 long double in hexadecimal format (10 bytes)
+    FP80HexToIntPair(TokStart+3, CurPtr, Pair);
+    APFloatVal = APFloat(APFloat::x87DoubleExtended(), APInt(80, Pair));
+    return lltok::APFloat;
+  case 'L':
+    // F128HexFPConstant - IEEE 128-bit in hexadecimal format (16 bytes)
+    HexToIntPair(TokStart+3, CurPtr, Pair);
+    APFloatVal = APFloat(APFloat::IEEEquad(), APInt(128, Pair));
+    return lltok::APFloat;
+  case 'M':
+    // PPC128HexFPConstant - PowerPC 128-bit in hexadecimal format (16 bytes)
+    HexToIntPair(TokStart+3, CurPtr, Pair);
+    APFloatVal = APFloat(APFloat::PPCDoubleDouble(), APInt(128, Pair));
+    return lltok::APFloat;
+  case 'H':
+    APFloatVal = APFloat(APFloat::IEEEhalf(),
+                         APInt(16,HexIntToVal(TokStart+3, CurPtr)));
+    return lltok::APFloat;
+  }
+}
+
+/// Lex tokens for a label or a numeric constant, possibly starting with -.
+///    Label             [-a-zA-Z$._0-9]+:
+///    NInteger          -[0-9]+
+///    FPConstant        [-+]?[0-9]+[.][0-9]*([eE][-+]?[0-9]+)?
+///    PInteger          [0-9]+
+///    HexFPConstant     0x[0-9A-Fa-f]+
+///    HexFP80Constant   0xK[0-9A-Fa-f]+
+///    HexFP128Constant  0xL[0-9A-Fa-f]+
+///    HexPPC128Constant 0xM[0-9A-Fa-f]+
+lltok::Kind LLLexer::LexDigitOrNegative() {
+  // If the letter after the negative is not a number, this is probably a label.
+  if (!isdigit(static_cast<unsigned char>(TokStart[0])) &&
+      !isdigit(static_cast<unsigned char>(CurPtr[0]))) {
+    // Okay, this is not a number after the -, it's probably a label.
+    if (const char *End = isLabelTail(CurPtr)) {
+      StrVal.assign(TokStart, End-1);
+      CurPtr = End;
+      return lltok::LabelStr;
+    }
+
+    return lltok::Error;
+  }
+
+  // At this point, it is either a label, int or fp constant.
+
+  // Skip digits, we have at least one.
+  for (; isdigit(static_cast<unsigned char>(CurPtr[0])); ++CurPtr)
+    /*empty*/;
+
+  // Check to see if this really is a label afterall, e.g. "-1:".
+  if (isLabelChar(CurPtr[0]) || CurPtr[0] == ':') {
+    if (const char *End = isLabelTail(CurPtr)) {
+      StrVal.assign(TokStart, End-1);
+      CurPtr = End;
+      return lltok::LabelStr;
+    }
+  }
+
+  // If the next character is a '.', then it is a fp value, otherwise its
+  // integer.
+  if (CurPtr[0] != '.') {
+    if (TokStart[0] == '0' && TokStart[1] == 'x')
+      return Lex0x();
+    APSIntVal = APSInt(StringRef(TokStart, CurPtr - TokStart));
+    return lltok::APSInt;
+  }
+
+  ++CurPtr;
+
+  // Skip over [0-9]*([eE][-+]?[0-9]+)?
+  while (isdigit(static_cast<unsigned char>(CurPtr[0]))) ++CurPtr;
+
+  if (CurPtr[0] == 'e' || CurPtr[0] == 'E') {
+    if (isdigit(static_cast<unsigned char>(CurPtr[1])) ||
+        ((CurPtr[1] == '-' || CurPtr[1] == '+') &&
+          isdigit(static_cast<unsigned char>(CurPtr[2])))) {
+      CurPtr += 2;
+      while (isdigit(static_cast<unsigned char>(CurPtr[0]))) ++CurPtr;
+    }
+  }
+
+  APFloatVal = APFloat(APFloat::IEEEdouble(),
+                       StringRef(TokStart, CurPtr - TokStart));
+  return lltok::APFloat;
+}
+
+/// Lex a floating point constant starting with +.
+///    FPConstant  [-+]?[0-9]+[.][0-9]*([eE][-+]?[0-9]+)?
+lltok::Kind LLLexer::LexPositive() {
+  // If the letter after the negative is a number, this is probably not a
+  // label.
+  if (!isdigit(static_cast<unsigned char>(CurPtr[0])))
+    return lltok::Error;
+
+  // Skip digits.
+  for (++CurPtr; isdigit(static_cast<unsigned char>(CurPtr[0])); ++CurPtr)
+    /*empty*/;
+
+  // At this point, we need a '.'.
+  if (CurPtr[0] != '.') {
+    CurPtr = TokStart+1;
+    return lltok::Error;
+  }
+
+  ++CurPtr;
+
+  // Skip over [0-9]*([eE][-+]?[0-9]+)?
+  while (isdigit(static_cast<unsigned char>(CurPtr[0]))) ++CurPtr;
+
+  if (CurPtr[0] == 'e' || CurPtr[0] == 'E') {
+    if (isdigit(static_cast<unsigned char>(CurPtr[1])) ||
+        ((CurPtr[1] == '-' || CurPtr[1] == '+') &&
+        isdigit(static_cast<unsigned char>(CurPtr[2])))) {
+      CurPtr += 2;
+      while (isdigit(static_cast<unsigned char>(CurPtr[0]))) ++CurPtr;
+    }
+  }
+
+  APFloatVal = APFloat(APFloat::IEEEdouble(),
+                       StringRef(TokStart, CurPtr - TokStart));
+  return lltok::APFloat;
+}
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp.patch b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp.patch
new file mode 100644
index 0000000000..c0fbc644cb
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.cpp.patch
@@ -0,0 +1,13 @@
+--- ../../../lib/AsmParser/LLLexer.cpp	2019-12-29 18:23:35.457918227 -0600
++++ lib/AsmParser/LLLexer.cpp	2019-12-29 18:44:36.376156576 -0600
+@@ -660,6 +660,10 @@
+   KEYWORD(uwtable);
+   KEYWORD(writeonly);
+   KEYWORD(zeroext);
++  // VISC parameter attributes
++  KEYWORD(in);
++  KEYWORD(out);
++  KEYWORD(inout);
+ 
+   KEYWORD(type);
+   KEYWORD(opaque);
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.h b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.h
new file mode 100644
index 0000000000..90bf17d7a7
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.h
@@ -0,0 +1,96 @@
+//===- LLLexer.h - Lexer for LLVM Assembly Files ----------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class represents the Lexer for .ll files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_ASMPARSER_LLLEXER_H
+#define LLVM_LIB_ASMPARSER_LLLEXER_H
+
+#include "LLToken.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/Support/SourceMgr.h"
+#include <string>
+
+namespace llvm {
+  class MemoryBuffer;
+  class Type;
+  class SMDiagnostic;
+  class LLVMContext;
+
+  class LLLexer {
+    const char *CurPtr;
+    StringRef CurBuf;
+    SMDiagnostic &ErrorInfo;
+    SourceMgr &SM;
+    LLVMContext &Context;
+
+    // Information about the current token.
+    const char *TokStart;
+    lltok::Kind CurKind;
+    std::string StrVal;
+    unsigned UIntVal;
+    Type *TyVal;
+    APFloat APFloatVal;
+    APSInt  APSIntVal;
+
+  public:
+    explicit LLLexer(StringRef StartBuf, SourceMgr &SM, SMDiagnostic &,
+                     LLVMContext &C);
+
+    lltok::Kind Lex() {
+      return CurKind = LexToken();
+    }
+
+    typedef SMLoc LocTy;
+    LocTy getLoc() const { return SMLoc::getFromPointer(TokStart); }
+    lltok::Kind getKind() const { return CurKind; }
+    const std::string &getStrVal() const { return StrVal; }
+    Type *getTyVal() const { return TyVal; }
+    unsigned getUIntVal() const { return UIntVal; }
+    const APSInt &getAPSIntVal() const { return APSIntVal; }
+    const APFloat &getAPFloatVal() const { return APFloatVal; }
+
+
+    bool Error(LocTy L, const Twine &Msg) const;
+    bool Error(const Twine &Msg) const { return Error(getLoc(), Msg); }
+
+    void Warning(LocTy WarningLoc, const Twine &Msg) const;
+    void Warning(const Twine &Msg) const { return Warning(getLoc(), Msg); }
+
+  private:
+    lltok::Kind LexToken();
+
+    int getNextChar();
+    void SkipLineComment();
+    lltok::Kind ReadString(lltok::Kind kind);
+    bool ReadVarName();
+
+    lltok::Kind LexIdentifier();
+    lltok::Kind LexDigitOrNegative();
+    lltok::Kind LexPositive();
+    lltok::Kind LexAt();
+    lltok::Kind LexDollar();
+    lltok::Kind LexExclaim();
+    lltok::Kind LexPercent();
+    lltok::Kind LexVar(lltok::Kind Var, lltok::Kind VarID);
+    lltok::Kind LexQuote();
+    lltok::Kind Lex0x();
+    lltok::Kind LexHash();
+
+    uint64_t atoull(const char *Buffer, const char *End);
+    uint64_t HexIntToVal(const char *Buffer, const char *End);
+    void HexToIntPair(const char *Buffer, const char *End, uint64_t Pair[2]);
+    void FP80HexToIntPair(const char *Buff, const char *End, uint64_t Pair[2]);
+  };
+} // end namespace llvm
+
+#endif
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.h.patch b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLLexer.h.patch
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp
new file mode 100644
index 0000000000..d7189c56ce
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp
@@ -0,0 +1,6574 @@
+//===-- LLParser.cpp - Parser Class ---------------------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the parser class for .ll files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "LLParser.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/AsmParser/SlotMapping.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstring>
+#include <iterator>
+#include <vector>
+
+using namespace llvm;
+
+static std::string getTypeString(Type *T) {
+  std::string Result;
+  raw_string_ostream Tmp(Result);
+  Tmp << *T;
+  return Tmp.str();
+}
+
+/// Run: module ::= toplevelentity*
+bool LLParser::Run() {
+  // Prime the lexer.
+  Lex.Lex();
+
+  if (Context.shouldDiscardValueNames())
+    return Error(
+        Lex.getLoc(),
+        "Can't read textual IR with a Context that discards named Values");
+
+  return ParseTopLevelEntities() ||
+         ValidateEndOfModule();
+}
+
+bool LLParser::parseStandaloneConstantValue(Constant *&C,
+                                            const SlotMapping *Slots) {
+  restoreParsingState(Slots);
+  Lex.Lex();
+
+  Type *Ty = nullptr;
+  if (ParseType(Ty) || parseConstantValue(Ty, C))
+    return true;
+  if (Lex.getKind() != lltok::Eof)
+    return Error(Lex.getLoc(), "expected end of string");
+  return false;
+}
+
+bool LLParser::parseTypeAtBeginning(Type *&Ty, unsigned &Read,
+                                    const SlotMapping *Slots) {
+  restoreParsingState(Slots);
+  Lex.Lex();
+
+  Read = 0;
+  SMLoc Start = Lex.getLoc();
+  Ty = nullptr;
+  if (ParseType(Ty))
+    return true;
+  SMLoc End = Lex.getLoc();
+  Read = End.getPointer() - Start.getPointer();
+
+  return false;
+}
+
+void LLParser::restoreParsingState(const SlotMapping *Slots) {
+  if (!Slots)
+    return;
+  NumberedVals = Slots->GlobalValues;
+  NumberedMetadata = Slots->MetadataNodes;
+  for (const auto &I : Slots->NamedTypes)
+    NamedTypes.insert(
+        std::make_pair(I.getKey(), std::make_pair(I.second, LocTy())));
+  for (const auto &I : Slots->Types)
+    NumberedTypes.insert(
+        std::make_pair(I.first, std::make_pair(I.second, LocTy())));
+}
+
+/// ValidateEndOfModule - Do final validity and sanity checks at the end of the
+/// module.
+bool LLParser::ValidateEndOfModule() {
+  // Handle any function attribute group forward references.
+  for (const auto &RAG : ForwardRefAttrGroups) {
+    Value *V = RAG.first;
+    const std::vector<unsigned> &Attrs = RAG.second;
+    AttrBuilder B;
+
+    for (const auto &Attr : Attrs)
+      B.merge(NumberedAttrBuilders[Attr]);
+
+    if (Function *Fn = dyn_cast<Function>(V)) {
+      AttributeSet AS = Fn->getAttributes();
+      AttrBuilder FnAttrs(AS.getFnAttributes(), AttributeSet::FunctionIndex);
+      AS = AS.removeAttributes(Context, AttributeSet::FunctionIndex,
+                               AS.getFnAttributes());
+
+      FnAttrs.merge(B);
+
+      // If the alignment was parsed as an attribute, move to the alignment
+      // field.
+      if (FnAttrs.hasAlignmentAttr()) {
+        Fn->setAlignment(FnAttrs.getAlignment());
+        FnAttrs.removeAttribute(Attribute::Alignment);
+      }
+
+      AS = AS.addAttributes(Context, AttributeSet::FunctionIndex,
+                            AttributeSet::get(Context,
+                                              AttributeSet::FunctionIndex,
+                                              FnAttrs));
+      Fn->setAttributes(AS);
+    } else if (CallInst *CI = dyn_cast<CallInst>(V)) {
+      AttributeSet AS = CI->getAttributes();
+      AttrBuilder FnAttrs(AS.getFnAttributes(), AttributeSet::FunctionIndex);
+      AS = AS.removeAttributes(Context, AttributeSet::FunctionIndex,
+                               AS.getFnAttributes());
+      FnAttrs.merge(B);
+      AS = AS.addAttributes(Context, AttributeSet::FunctionIndex,
+                            AttributeSet::get(Context,
+                                              AttributeSet::FunctionIndex,
+                                              FnAttrs));
+      CI->setAttributes(AS);
+    } else if (InvokeInst *II = dyn_cast<InvokeInst>(V)) {
+      AttributeSet AS = II->getAttributes();
+      AttrBuilder FnAttrs(AS.getFnAttributes(), AttributeSet::FunctionIndex);
+      AS = AS.removeAttributes(Context, AttributeSet::FunctionIndex,
+                               AS.getFnAttributes());
+      FnAttrs.merge(B);
+      AS = AS.addAttributes(Context, AttributeSet::FunctionIndex,
+                            AttributeSet::get(Context,
+                                              AttributeSet::FunctionIndex,
+                                              FnAttrs));
+      II->setAttributes(AS);
+    } else {
+      llvm_unreachable("invalid object with forward attribute group reference");
+    }
+  }
+
+  // If there are entries in ForwardRefBlockAddresses at this point, the
+  // function was never defined.
+  if (!ForwardRefBlockAddresses.empty())
+    return Error(ForwardRefBlockAddresses.begin()->first.Loc,
+                 "expected function name in blockaddress");
+
+  for (const auto &NT : NumberedTypes)
+    if (NT.second.second.isValid())
+      return Error(NT.second.second,
+                   "use of undefined type '%" + Twine(NT.first) + "'");
+
+  for (StringMap<std::pair<Type*, LocTy> >::iterator I =
+       NamedTypes.begin(), E = NamedTypes.end(); I != E; ++I)
+    if (I->second.second.isValid())
+      return Error(I->second.second,
+                   "use of undefined type named '" + I->getKey() + "'");
+
+  if (!ForwardRefComdats.empty())
+    return Error(ForwardRefComdats.begin()->second,
+                 "use of undefined comdat '$" +
+                     ForwardRefComdats.begin()->first + "'");
+
+  if (!ForwardRefVals.empty())
+    return Error(ForwardRefVals.begin()->second.second,
+                 "use of undefined value '@" + ForwardRefVals.begin()->first +
+                 "'");
+
+  if (!ForwardRefValIDs.empty())
+    return Error(ForwardRefValIDs.begin()->second.second,
+                 "use of undefined value '@" +
+                 Twine(ForwardRefValIDs.begin()->first) + "'");
+
+  if (!ForwardRefMDNodes.empty())
+    return Error(ForwardRefMDNodes.begin()->second.second,
+                 "use of undefined metadata '!" +
+                 Twine(ForwardRefMDNodes.begin()->first) + "'");
+
+  // Resolve metadata cycles.
+  for (auto &N : NumberedMetadata) {
+    if (N.second && !N.second->isResolved())
+      N.second->resolveCycles();
+  }
+
+  for (auto *Inst : InstsWithTBAATag) {
+    MDNode *MD = Inst->getMetadata(LLVMContext::MD_tbaa);
+    assert(MD && "UpgradeInstWithTBAATag should have a TBAA tag");
+    auto *UpgradedMD = UpgradeTBAANode(*MD);
+    if (MD != UpgradedMD)
+      Inst->setMetadata(LLVMContext::MD_tbaa, UpgradedMD);
+  }
+
+  // Look for intrinsic functions and CallInst that need to be upgraded
+  for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; )
+    UpgradeCallsToIntrinsic(&*FI++); // must be post-increment, as we remove
+
+  // Some types could be renamed during loading if several modules are
+  // loaded in the same LLVMContext (LTO scenario). In this case we should
+  // remangle intrinsics names as well.
+  for (Module::iterator FI = M->begin(), FE = M->end(); FI != FE; ) {
+    Function *F = &*FI++;
+    if (auto Remangled = Intrinsic::remangleIntrinsicFunction(F)) {
+      F->replaceAllUsesWith(Remangled.getValue());
+      F->eraseFromParent();
+    }
+  }
+
+  UpgradeDebugInfo(*M);
+
+  UpgradeModuleFlags(*M);
+
+  if (!Slots)
+    return false;
+  // Initialize the slot mapping.
+  // Because by this point we've parsed and validated everything, we can "steal"
+  // the mapping from LLParser as it doesn't need it anymore.
+  Slots->GlobalValues = std::move(NumberedVals);
+  Slots->MetadataNodes = std::move(NumberedMetadata);
+  for (const auto &I : NamedTypes)
+    Slots->NamedTypes.insert(std::make_pair(I.getKey(), I.second.first));
+  for (const auto &I : NumberedTypes)
+    Slots->Types.insert(std::make_pair(I.first, I.second.first));
+
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-Level Entities
+//===----------------------------------------------------------------------===//
+
+bool LLParser::ParseTopLevelEntities() {
+  while (true) {
+    switch (Lex.getKind()) {
+    default:         return TokError("expected top-level entity");
+    case lltok::Eof: return false;
+    case lltok::kw_declare: if (ParseDeclare()) return true; break;
+    case lltok::kw_define:  if (ParseDefine()) return true; break;
+    case lltok::kw_module:  if (ParseModuleAsm()) return true; break;
+    case lltok::kw_target:  if (ParseTargetDefinition()) return true; break;
+    case lltok::kw_source_filename:
+      if (ParseSourceFileName())
+        return true;
+      break;
+    case lltok::kw_deplibs: if (ParseDepLibs()) return true; break;
+    case lltok::LocalVarID: if (ParseUnnamedType()) return true; break;
+    case lltok::LocalVar:   if (ParseNamedType()) return true; break;
+    case lltok::GlobalID:   if (ParseUnnamedGlobal()) return true; break;
+    case lltok::GlobalVar:  if (ParseNamedGlobal()) return true; break;
+    case lltok::ComdatVar:  if (parseComdat()) return true; break;
+    case lltok::exclaim:    if (ParseStandaloneMetadata()) return true; break;
+    case lltok::MetadataVar:if (ParseNamedMetadata()) return true; break;
+    case lltok::kw_attributes: if (ParseUnnamedAttrGrp()) return true; break;
+    case lltok::kw_uselistorder: if (ParseUseListOrder()) return true; break;
+    case lltok::kw_uselistorder_bb:
+      if (ParseUseListOrderBB())
+        return true;
+      break;
+    }
+  }
+}
+
+/// toplevelentity
+///   ::= 'module' 'asm' STRINGCONSTANT
+bool LLParser::ParseModuleAsm() {
+  assert(Lex.getKind() == lltok::kw_module);
+  Lex.Lex();
+
+  std::string AsmStr;
+  if (ParseToken(lltok::kw_asm, "expected 'module asm'") ||
+      ParseStringConstant(AsmStr)) return true;
+
+  M->appendModuleInlineAsm(AsmStr);
+  return false;
+}
+
+/// toplevelentity
+///   ::= 'target' 'triple' '=' STRINGCONSTANT
+///   ::= 'target' 'datalayout' '=' STRINGCONSTANT
+bool LLParser::ParseTargetDefinition() {
+  assert(Lex.getKind() == lltok::kw_target);
+  std::string Str;
+  switch (Lex.Lex()) {
+  default: return TokError("unknown target property");
+  case lltok::kw_triple:
+    Lex.Lex();
+    if (ParseToken(lltok::equal, "expected '=' after target triple") ||
+        ParseStringConstant(Str))
+      return true;
+    M->setTargetTriple(Str);
+    return false;
+  case lltok::kw_datalayout:
+    Lex.Lex();
+    if (ParseToken(lltok::equal, "expected '=' after target datalayout") ||
+        ParseStringConstant(Str))
+      return true;
+    M->setDataLayout(Str);
+    return false;
+  }
+}
+
+/// toplevelentity
+///   ::= 'source_filename' '=' STRINGCONSTANT
+bool LLParser::ParseSourceFileName() {
+  assert(Lex.getKind() == lltok::kw_source_filename);
+  std::string Str;
+  Lex.Lex();
+  if (ParseToken(lltok::equal, "expected '=' after source_filename") ||
+      ParseStringConstant(Str))
+    return true;
+  M->setSourceFileName(Str);
+  return false;
+}
+
+/// toplevelentity
+///   ::= 'deplibs' '=' '[' ']'
+///   ::= 'deplibs' '=' '[' STRINGCONSTANT (',' STRINGCONSTANT)* ']'
+/// FIXME: Remove in 4.0. Currently parse, but ignore.
+bool LLParser::ParseDepLibs() {
+  assert(Lex.getKind() == lltok::kw_deplibs);
+  Lex.Lex();
+  if (ParseToken(lltok::equal, "expected '=' after deplibs") ||
+      ParseToken(lltok::lsquare, "expected '=' after deplibs"))
+    return true;
+
+  if (EatIfPresent(lltok::rsquare))
+    return false;
+
+  do {
+    std::string Str;
+    if (ParseStringConstant(Str)) return true;
+  } while (EatIfPresent(lltok::comma));
+
+  return ParseToken(lltok::rsquare, "expected ']' at end of list");
+}
+
+/// ParseUnnamedType:
+///   ::= LocalVarID '=' 'type' type
+bool LLParser::ParseUnnamedType() {
+  LocTy TypeLoc = Lex.getLoc();
+  unsigned TypeID = Lex.getUIntVal();
+  Lex.Lex(); // eat LocalVarID;
+
+  if (ParseToken(lltok::equal, "expected '=' after name") ||
+      ParseToken(lltok::kw_type, "expected 'type' after '='"))
+    return true;
+
+  Type *Result = nullptr;
+  if (ParseStructDefinition(TypeLoc, "",
+                            NumberedTypes[TypeID], Result)) return true;
+
+  if (!isa<StructType>(Result)) {
+    std::pair<Type*, LocTy> &Entry = NumberedTypes[TypeID];
+    if (Entry.first)
+      return Error(TypeLoc, "non-struct types may not be recursive");
+    Entry.first = Result;
+    Entry.second = SMLoc();
+  }
+
+  return false;
+}
+
+/// toplevelentity
+///   ::= LocalVar '=' 'type' type
+bool LLParser::ParseNamedType() {
+  std::string Name = Lex.getStrVal();
+  LocTy NameLoc = Lex.getLoc();
+  Lex.Lex();  // eat LocalVar.
+
+  if (ParseToken(lltok::equal, "expected '=' after name") ||
+      ParseToken(lltok::kw_type, "expected 'type' after name"))
+    return true;
+
+  Type *Result = nullptr;
+  if (ParseStructDefinition(NameLoc, Name,
+                            NamedTypes[Name], Result)) return true;
+
+  if (!isa<StructType>(Result)) {
+    std::pair<Type*, LocTy> &Entry = NamedTypes[Name];
+    if (Entry.first)
+      return Error(NameLoc, "non-struct types may not be recursive");
+    Entry.first = Result;
+    Entry.second = SMLoc();
+  }
+
+  return false;
+}
+
+/// toplevelentity
+///   ::= 'declare' FunctionHeader
+bool LLParser::ParseDeclare() {
+  assert(Lex.getKind() == lltok::kw_declare);
+  Lex.Lex();
+
+  std::vector<std::pair<unsigned, MDNode *>> MDs;
+  while (Lex.getKind() == lltok::MetadataVar) {
+    unsigned MDK;
+    MDNode *N;
+    if (ParseMetadataAttachment(MDK, N))
+      return true;
+    MDs.push_back({MDK, N});
+  }
+
+  Function *F;
+  if (ParseFunctionHeader(F, false))
+    return true;
+  for (auto &MD : MDs)
+    F->addMetadata(MD.first, *MD.second);
+  return false;
+}
+
+/// toplevelentity
+///   ::= 'define' FunctionHeader (!dbg !56)* '{' ...
+bool LLParser::ParseDefine() {
+  assert(Lex.getKind() == lltok::kw_define);
+  Lex.Lex();
+
+  Function *F;
+  return ParseFunctionHeader(F, true) ||
+         ParseOptionalFunctionMetadata(*F) ||
+         ParseFunctionBody(*F);
+}
+
+/// ParseGlobalType
+///   ::= 'constant'
+///   ::= 'global'
+bool LLParser::ParseGlobalType(bool &IsConstant) {
+  if (Lex.getKind() == lltok::kw_constant)
+    IsConstant = true;
+  else if (Lex.getKind() == lltok::kw_global)
+    IsConstant = false;
+  else {
+    IsConstant = false;
+    return TokError("expected 'global' or 'constant'");
+  }
+  Lex.Lex();
+  return false;
+}
+
+bool LLParser::ParseOptionalUnnamedAddr(
+    GlobalVariable::UnnamedAddr &UnnamedAddr) {
+  if (EatIfPresent(lltok::kw_unnamed_addr))
+    UnnamedAddr = GlobalValue::UnnamedAddr::Global;
+  else if (EatIfPresent(lltok::kw_local_unnamed_addr))
+    UnnamedAddr = GlobalValue::UnnamedAddr::Local;
+  else
+    UnnamedAddr = GlobalValue::UnnamedAddr::None;
+  return false;
+}
+
+/// ParseUnnamedGlobal:
+///   OptionalVisibility (ALIAS | IFUNC) ...
+///   OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+///                                                     ...   -> global variable
+///   GlobalID '=' OptionalVisibility (ALIAS | IFUNC) ...
+///   GlobalID '=' OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+///                                                     ...   -> global variable
+bool LLParser::ParseUnnamedGlobal() {
+  unsigned VarID = NumberedVals.size();
+  std::string Name;
+  LocTy NameLoc = Lex.getLoc();
+
+  // Handle the GlobalID form.
+  if (Lex.getKind() == lltok::GlobalID) {
+    if (Lex.getUIntVal() != VarID)
+      return Error(Lex.getLoc(), "variable expected to be numbered '%" +
+                   Twine(VarID) + "'");
+    Lex.Lex(); // eat GlobalID;
+
+    if (ParseToken(lltok::equal, "expected '=' after name"))
+      return true;
+  }
+
+  bool HasLinkage;
+  unsigned Linkage, Visibility, DLLStorageClass;
+  GlobalVariable::ThreadLocalMode TLM;
+  GlobalVariable::UnnamedAddr UnnamedAddr;
+  if (ParseOptionalLinkage(Linkage, HasLinkage, Visibility, DLLStorageClass) ||
+      ParseOptionalThreadLocal(TLM) || ParseOptionalUnnamedAddr(UnnamedAddr))
+    return true;
+
+  if (Lex.getKind() != lltok::kw_alias && Lex.getKind() != lltok::kw_ifunc)
+    return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility,
+                       DLLStorageClass, TLM, UnnamedAddr);
+
+  return parseIndirectSymbol(Name, NameLoc, Linkage, Visibility,
+                             DLLStorageClass, TLM, UnnamedAddr);
+}
+
+/// ParseNamedGlobal:
+///   GlobalVar '=' OptionalVisibility (ALIAS | IFUNC) ...
+///   GlobalVar '=' OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+///                                                     ...   -> global variable
+bool LLParser::ParseNamedGlobal() {
+  assert(Lex.getKind() == lltok::GlobalVar);
+  LocTy NameLoc = Lex.getLoc();
+  std::string Name = Lex.getStrVal();
+  Lex.Lex();
+
+  bool HasLinkage;
+  unsigned Linkage, Visibility, DLLStorageClass;
+  GlobalVariable::ThreadLocalMode TLM;
+  GlobalVariable::UnnamedAddr UnnamedAddr;
+  if (ParseToken(lltok::equal, "expected '=' in global variable") ||
+      ParseOptionalLinkage(Linkage, HasLinkage, Visibility, DLLStorageClass) ||
+      ParseOptionalThreadLocal(TLM) || ParseOptionalUnnamedAddr(UnnamedAddr))
+    return true;
+
+  if (Lex.getKind() != lltok::kw_alias && Lex.getKind() != lltok::kw_ifunc)
+    return ParseGlobal(Name, NameLoc, Linkage, HasLinkage, Visibility,
+                       DLLStorageClass, TLM, UnnamedAddr);
+
+  return parseIndirectSymbol(Name, NameLoc, Linkage, Visibility,
+                             DLLStorageClass, TLM, UnnamedAddr);
+}
+
+bool LLParser::parseComdat() {
+  assert(Lex.getKind() == lltok::ComdatVar);
+  std::string Name = Lex.getStrVal();
+  LocTy NameLoc = Lex.getLoc();
+  Lex.Lex();
+
+  if (ParseToken(lltok::equal, "expected '=' here"))
+    return true;
+
+  if (ParseToken(lltok::kw_comdat, "expected comdat keyword"))
+    return TokError("expected comdat type");
+
+  Comdat::SelectionKind SK;
+  switch (Lex.getKind()) {
+  default:
+    return TokError("unknown selection kind");
+  case lltok::kw_any:
+    SK = Comdat::Any;
+    break;
+  case lltok::kw_exactmatch:
+    SK = Comdat::ExactMatch;
+    break;
+  case lltok::kw_largest:
+    SK = Comdat::Largest;
+    break;
+  case lltok::kw_noduplicates:
+    SK = Comdat::NoDuplicates;
+    break;
+  case lltok::kw_samesize:
+    SK = Comdat::SameSize;
+    break;
+  }
+  Lex.Lex();
+
+  // See if the comdat was forward referenced, if so, use the comdat.
+  Module::ComdatSymTabType &ComdatSymTab = M->getComdatSymbolTable();
+  Module::ComdatSymTabType::iterator I = ComdatSymTab.find(Name);
+  if (I != ComdatSymTab.end() && !ForwardRefComdats.erase(Name))
+    return Error(NameLoc, "redefinition of comdat '$" + Name + "'");
+
+  Comdat *C;
+  if (I != ComdatSymTab.end())
+    C = &I->second;
+  else
+    C = M->getOrInsertComdat(Name);
+  C->setSelectionKind(SK);
+
+  return false;
+}
+
+// MDString:
+//   ::= '!' STRINGCONSTANT
+bool LLParser::ParseMDString(MDString *&Result) {
+  std::string Str;
+  if (ParseStringConstant(Str)) return true;
+  Result = MDString::get(Context, Str);
+  return false;
+}
+
+// MDNode:
+//   ::= '!' MDNodeNumber
+bool LLParser::ParseMDNodeID(MDNode *&Result) {
+  // !{ ..., !42, ... }
+  LocTy IDLoc = Lex.getLoc();
+  unsigned MID = 0;
+  if (ParseUInt32(MID))
+    return true;
+
+  // If not a forward reference, just return it now.
+  if (NumberedMetadata.count(MID)) {
+    Result = NumberedMetadata[MID];
+    return false;
+  }
+
+  // Otherwise, create MDNode forward reference.
+  auto &FwdRef = ForwardRefMDNodes[MID];
+  FwdRef = std::make_pair(MDTuple::getTemporary(Context, None), IDLoc);
+
+  Result = FwdRef.first.get();
+  NumberedMetadata[MID].reset(Result);
+  return false;
+}
+
+/// ParseNamedMetadata:
+///   !foo = !{ !1, !2 }
+bool LLParser::ParseNamedMetadata() {
+  assert(Lex.getKind() == lltok::MetadataVar);
+  std::string Name = Lex.getStrVal();
+  Lex.Lex();
+
+  if (ParseToken(lltok::equal, "expected '=' here") ||
+      ParseToken(lltok::exclaim, "Expected '!' here") ||
+      ParseToken(lltok::lbrace, "Expected '{' here"))
+    return true;
+
+  NamedMDNode *NMD = M->getOrInsertNamedMetadata(Name);
+  if (Lex.getKind() != lltok::rbrace)
+    do {
+      if (ParseToken(lltok::exclaim, "Expected '!' here"))
+        return true;
+
+      MDNode *N = nullptr;
+      if (ParseMDNodeID(N)) return true;
+      NMD->addOperand(N);
+    } while (EatIfPresent(lltok::comma));
+
+  return ParseToken(lltok::rbrace, "expected end of metadata node");
+}
+
+/// ParseStandaloneMetadata:
+///   !42 = !{...}
+bool LLParser::ParseStandaloneMetadata() {
+  assert(Lex.getKind() == lltok::exclaim);
+  Lex.Lex();
+  unsigned MetadataID = 0;
+
+  MDNode *Init;
+  if (ParseUInt32(MetadataID) ||
+      ParseToken(lltok::equal, "expected '=' here"))
+    return true;
+
+  // Detect common error, from old metadata syntax.
+  if (Lex.getKind() == lltok::Type)
+    return TokError("unexpected type in metadata definition");
+
+  bool IsDistinct = EatIfPresent(lltok::kw_distinct);
+  if (Lex.getKind() == lltok::MetadataVar) {
+    if (ParseSpecializedMDNode(Init, IsDistinct))
+      return true;
+  } else if (ParseToken(lltok::exclaim, "Expected '!' here") ||
+             ParseMDTuple(Init, IsDistinct))
+    return true;
+
+  // See if this was forward referenced, if so, handle it.
+  auto FI = ForwardRefMDNodes.find(MetadataID);
+  if (FI != ForwardRefMDNodes.end()) {
+    FI->second.first->replaceAllUsesWith(Init);
+    ForwardRefMDNodes.erase(FI);
+
+    assert(NumberedMetadata[MetadataID] == Init && "Tracking VH didn't work");
+  } else {
+    if (NumberedMetadata.count(MetadataID))
+      return TokError("Metadata id is already used");
+    NumberedMetadata[MetadataID].reset(Init);
+  }
+
+  return false;
+}
+
+static bool isValidVisibilityForLinkage(unsigned V, unsigned L) {
+  return !GlobalValue::isLocalLinkage((GlobalValue::LinkageTypes)L) ||
+         (GlobalValue::VisibilityTypes)V == GlobalValue::DefaultVisibility;
+}
+
+/// parseIndirectSymbol:
+///   ::= GlobalVar '=' OptionalLinkage OptionalVisibility
+///                     OptionalDLLStorageClass OptionalThreadLocal
+///                     OptionalUnnamedAddr 'alias|ifunc' IndirectSymbol
+///
+/// IndirectSymbol
+///   ::= TypeAndValue
+///
+/// Everything through OptionalUnnamedAddr has already been parsed.
+///
+bool LLParser::parseIndirectSymbol(
+    const std::string &Name, LocTy NameLoc, unsigned L, unsigned Visibility,
+    unsigned DLLStorageClass, GlobalVariable::ThreadLocalMode TLM,
+    GlobalVariable::UnnamedAddr UnnamedAddr) {
+  bool IsAlias;
+  if (Lex.getKind() == lltok::kw_alias)
+    IsAlias = true;
+  else if (Lex.getKind() == lltok::kw_ifunc)
+    IsAlias = false;
+  else
+    llvm_unreachable("Not an alias or ifunc!");
+  Lex.Lex();
+
+  GlobalValue::LinkageTypes Linkage = (GlobalValue::LinkageTypes) L;
+
+  if(IsAlias && !GlobalAlias::isValidLinkage(Linkage))
+    return Error(NameLoc, "invalid linkage type for alias");
+
+  if (!isValidVisibilityForLinkage(Visibility, L))
+    return Error(NameLoc,
+                 "symbol with local linkage must have default visibility");
+
+  Type *Ty;
+  LocTy ExplicitTypeLoc = Lex.getLoc();
+  if (ParseType(Ty) ||
+      ParseToken(lltok::comma, "expected comma after alias or ifunc's type"))
+    return true;
+
+  Constant *Aliasee;
+  LocTy AliaseeLoc = Lex.getLoc();
+  if (Lex.getKind() != lltok::kw_bitcast &&
+      Lex.getKind() != lltok::kw_getelementptr &&
+      Lex.getKind() != lltok::kw_addrspacecast &&
+      Lex.getKind() != lltok::kw_inttoptr) {
+    if (ParseGlobalTypeAndValue(Aliasee))
+      return true;
+  } else {
+    // The bitcast dest type is not present, it is implied by the dest type.
+    ValID ID;
+    if (ParseValID(ID))
+      return true;
+    if (ID.Kind != ValID::t_Constant)
+      return Error(AliaseeLoc, "invalid aliasee");
+    Aliasee = ID.ConstantVal;
+  }
+
+  Type *AliaseeType = Aliasee->getType();
+  auto *PTy = dyn_cast<PointerType>(AliaseeType);
+  if (!PTy)
+    return Error(AliaseeLoc, "An alias or ifunc must have pointer type");
+  unsigned AddrSpace = PTy->getAddressSpace();
+
+  if (IsAlias && Ty != PTy->getElementType())
+    return Error(
+        ExplicitTypeLoc,
+        "explicit pointee type doesn't match operand's pointee type");
+
+  if (!IsAlias && !PTy->getElementType()->isFunctionTy())
+    return Error(
+        ExplicitTypeLoc,
+        "explicit pointee type should be a function type");
+
+  GlobalValue *GVal = nullptr;
+
+  // See if the alias was forward referenced, if so, prepare to replace the
+  // forward reference.
+  if (!Name.empty()) {
+    GVal = M->getNamedValue(Name);
+    if (GVal) {
+      if (!ForwardRefVals.erase(Name))
+        return Error(NameLoc, "redefinition of global '@" + Name + "'");
+    }
+  } else {
+    auto I = ForwardRefValIDs.find(NumberedVals.size());
+    if (I != ForwardRefValIDs.end()) {
+      GVal = I->second.first;
+      ForwardRefValIDs.erase(I);
+    }
+  }
+
+  // Okay, create the alias but do not insert it into the module yet.
+  std::unique_ptr<GlobalIndirectSymbol> GA;
+  if (IsAlias)
+    GA.reset(GlobalAlias::create(Ty, AddrSpace,
+                                 (GlobalValue::LinkageTypes)Linkage, Name,
+                                 Aliasee, /*Parent*/ nullptr));
+  else
+    GA.reset(GlobalIFunc::create(Ty, AddrSpace,
+                                 (GlobalValue::LinkageTypes)Linkage, Name,
+                                 Aliasee, /*Parent*/ nullptr));
+  GA->setThreadLocalMode(TLM);
+  GA->setVisibility((GlobalValue::VisibilityTypes)Visibility);
+  GA->setDLLStorageClass((GlobalValue::DLLStorageClassTypes)DLLStorageClass);
+  GA->setUnnamedAddr(UnnamedAddr);
+
+  if (Name.empty())
+    NumberedVals.push_back(GA.get());
+
+  if (GVal) {
+    // Verify that types agree.
+    if (GVal->getType() != GA->getType())
+      return Error(
+          ExplicitTypeLoc,
+          "forward reference and definition of alias have different types");
+
+    // If they agree, just RAUW the old value with the alias and remove the
+    // forward ref info.
+    GVal->replaceAllUsesWith(GA.get());
+    GVal->eraseFromParent();
+  }
+
+  // Insert into the module, we know its name won't collide now.
+  if (IsAlias)
+    M->getAliasList().push_back(cast<GlobalAlias>(GA.get()));
+  else
+    M->getIFuncList().push_back(cast<GlobalIFunc>(GA.get()));
+  assert(GA->getName() == Name && "Should not be a name conflict!");
+
+  // The module owns this now
+  GA.release();
+
+  return false;
+}
+
+/// ParseGlobal
+///   ::= GlobalVar '=' OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+///       OptionalThreadLocal OptionalUnnamedAddr OptionalAddrSpace
+///       OptionalExternallyInitialized GlobalType Type Const
+///   ::= OptionalLinkage OptionalVisibility OptionalDLLStorageClass
+///       OptionalThreadLocal OptionalUnnamedAddr OptionalAddrSpace
+///       OptionalExternallyInitialized GlobalType Type Const
+///
+/// Everything up to and including OptionalUnnamedAddr has been parsed
+/// already.
+///
+bool LLParser::ParseGlobal(const std::string &Name, LocTy NameLoc,
+                           unsigned Linkage, bool HasLinkage,
+                           unsigned Visibility, unsigned DLLStorageClass,
+                           GlobalVariable::ThreadLocalMode TLM,
+                           GlobalVariable::UnnamedAddr UnnamedAddr) {
+  if (!isValidVisibilityForLinkage(Visibility, Linkage))
+    return Error(NameLoc,
+                 "symbol with local linkage must have default visibility");
+
+  unsigned AddrSpace;
+  bool IsConstant, IsExternallyInitialized;
+  LocTy IsExternallyInitializedLoc;
+  LocTy TyLoc;
+
+  Type *Ty = nullptr;
+  if (ParseOptionalAddrSpace(AddrSpace) ||
+      ParseOptionalToken(lltok::kw_externally_initialized,
+                         IsExternallyInitialized,
+                         &IsExternallyInitializedLoc) ||
+      ParseGlobalType(IsConstant) ||
+      ParseType(Ty, TyLoc))
+    return true;
+
+  // If the linkage is specified and is external, then no initializer is
+  // present.
+  Constant *Init = nullptr;
+  if (!HasLinkage ||
+      !GlobalValue::isValidDeclarationLinkage(
+          (GlobalValue::LinkageTypes)Linkage)) {
+    if (ParseGlobalValue(Ty, Init))
+      return true;
+  }
+
+  if (Ty->isFunctionTy() || !PointerType::isValidElementType(Ty))
+    return Error(TyLoc, "invalid type for global variable");
+
+  GlobalValue *GVal = nullptr;
+
+  // See if the global was forward referenced, if so, use the global.
+  if (!Name.empty()) {
+    GVal = M->getNamedValue(Name);
+    if (GVal) {
+      if (!ForwardRefVals.erase(Name))
+        return Error(NameLoc, "redefinition of global '@" + Name + "'");
+    }
+  } else {
+    auto I = ForwardRefValIDs.find(NumberedVals.size());
+    if (I != ForwardRefValIDs.end()) {
+      GVal = I->second.first;
+      ForwardRefValIDs.erase(I);
+    }
+  }
+
+  GlobalVariable *GV;
+  if (!GVal) {
+    GV = new GlobalVariable(*M, Ty, false, GlobalValue::ExternalLinkage, nullptr,
+                            Name, nullptr, GlobalVariable::NotThreadLocal,
+                            AddrSpace);
+  } else {
+    if (GVal->getValueType() != Ty)
+      return Error(TyLoc,
+            "forward reference and definition of global have different types");
+
+    GV = cast<GlobalVariable>(GVal);
+
+    // Move the forward-reference to the correct spot in the module.
+    M->getGlobalList().splice(M->global_end(), M->getGlobalList(), GV);
+  }
+
+  if (Name.empty())
+    NumberedVals.push_back(GV);
+
+  // Set the parsed properties on the global.
+  if (Init)
+    GV->setInitializer(Init);
+  GV->setConstant(IsConstant);
+  GV->setLinkage((GlobalValue::LinkageTypes)Linkage);
+  GV->setVisibility((GlobalValue::VisibilityTypes)Visibility);
+  GV->setDLLStorageClass((GlobalValue::DLLStorageClassTypes)DLLStorageClass);
+  GV->setExternallyInitialized(IsExternallyInitialized);
+  GV->setThreadLocalMode(TLM);
+  GV->setUnnamedAddr(UnnamedAddr);
+
+  // Parse attributes on the global.
+  while (Lex.getKind() == lltok::comma) {
+    Lex.Lex();
+
+    if (Lex.getKind() == lltok::kw_section) {
+      Lex.Lex();
+      GV->setSection(Lex.getStrVal());
+      if (ParseToken(lltok::StringConstant, "expected global section string"))
+        return true;
+    } else if (Lex.getKind() == lltok::kw_align) {
+      unsigned Alignment;
+      if (ParseOptionalAlignment(Alignment)) return true;
+      GV->setAlignment(Alignment);
+    } else if (Lex.getKind() == lltok::MetadataVar) {
+      if (ParseGlobalObjectMetadataAttachment(*GV))
+        return true;
+    } else {
+      Comdat *C;
+      if (parseOptionalComdat(Name, C))
+        return true;
+      if (C)
+        GV->setComdat(C);
+      else
+        return TokError("unknown global variable property!");
+    }
+  }
+
+  return false;
+}
+
+/// ParseUnnamedAttrGrp
+///   ::= 'attributes' AttrGrpID '=' '{' AttrValPair+ '}'
+bool LLParser::ParseUnnamedAttrGrp() {
+  assert(Lex.getKind() == lltok::kw_attributes);
+  LocTy AttrGrpLoc = Lex.getLoc();
+  Lex.Lex();
+
+  if (Lex.getKind() != lltok::AttrGrpID)
+    return TokError("expected attribute group id");
+
+  unsigned VarID = Lex.getUIntVal();
+  std::vector<unsigned> unused;
+  LocTy BuiltinLoc;
+  Lex.Lex();
+
+  if (ParseToken(lltok::equal, "expected '=' here") ||
+      ParseToken(lltok::lbrace, "expected '{' here") ||
+      ParseFnAttributeValuePairs(NumberedAttrBuilders[VarID], unused, true,
+                                 BuiltinLoc) ||
+      ParseToken(lltok::rbrace, "expected end of attribute group"))
+    return true;
+
+  if (!NumberedAttrBuilders[VarID].hasAttributes())
+    return Error(AttrGrpLoc, "attribute group has no attributes");
+
+  return false;
+}
+
+/// ParseFnAttributeValuePairs
+///   ::= <attr> | <attr> '=' <value>
+bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B,
+                                          std::vector<unsigned> &FwdRefAttrGrps,
+                                          bool inAttrGrp, LocTy &BuiltinLoc) {
+  bool HaveError = false;
+
+  B.clear();
+
+  while (true) {
+    lltok::Kind Token = Lex.getKind();
+    if (Token == lltok::kw_builtin)
+      BuiltinLoc = Lex.getLoc();
+    switch (Token) {
+    default:
+      if (!inAttrGrp) return HaveError;
+      return Error(Lex.getLoc(), "unterminated attribute group");
+    case lltok::rbrace:
+      // Finished.
+      return false;
+
+    case lltok::AttrGrpID: {
+      // Allow a function to reference an attribute group:
+      //
+      //   define void @foo() #1 { ... }
+      if (inAttrGrp)
+        HaveError |=
+          Error(Lex.getLoc(),
+              "cannot have an attribute group reference in an attribute group");
+
+      unsigned AttrGrpNum = Lex.getUIntVal();
+      if (inAttrGrp) break;
+
+      // Save the reference to the attribute group. We'll fill it in later.
+      FwdRefAttrGrps.push_back(AttrGrpNum);
+      break;
+    }
+    // Target-dependent attributes:
+    case lltok::StringConstant: {
+      if (ParseStringAttribute(B))
+        return true;
+      continue;
+    }
+
+    // Target-independent attributes:
+    case lltok::kw_align: {
+      // As a hack, we allow function alignment to be initially parsed as an
+      // attribute on a function declaration/definition or added to an attribute
+      // group and later moved to the alignment field.
+      unsigned Alignment;
+      if (inAttrGrp) {
+        Lex.Lex();
+        if (ParseToken(lltok::equal, "expected '=' here") ||
+            ParseUInt32(Alignment))
+          return true;
+      } else {
+        if (ParseOptionalAlignment(Alignment))
+          return true;
+      }
+      B.addAlignmentAttr(Alignment);
+      continue;
+    }
+    case lltok::kw_alignstack: {
+      unsigned Alignment;
+      if (inAttrGrp) {
+        Lex.Lex();
+        if (ParseToken(lltok::equal, "expected '=' here") ||
+            ParseUInt32(Alignment))
+          return true;
+      } else {
+        if (ParseOptionalStackAlignment(Alignment))
+          return true;
+      }
+      B.addStackAlignmentAttr(Alignment);
+      continue;
+    }
+    case lltok::kw_allocsize: {
+      unsigned ElemSizeArg;
+      Optional<unsigned> NumElemsArg;
+      // inAttrGrp doesn't matter; we only support allocsize(a[, b])
+      if (parseAllocSizeArguments(ElemSizeArg, NumElemsArg))
+        return true;
+      B.addAllocSizeAttr(ElemSizeArg, NumElemsArg);
+      continue;
+    }
+    case lltok::kw_alwaysinline: B.addAttribute(Attribute::AlwaysInline); break;
+    case lltok::kw_argmemonly: B.addAttribute(Attribute::ArgMemOnly); break;
+    case lltok::kw_builtin: B.addAttribute(Attribute::Builtin); break;
+    case lltok::kw_cold: B.addAttribute(Attribute::Cold); break;
+    case lltok::kw_convergent: B.addAttribute(Attribute::Convergent); break;
+    case lltok::kw_inaccessiblememonly:
+      B.addAttribute(Attribute::InaccessibleMemOnly); break;
+    case lltok::kw_inaccessiblemem_or_argmemonly:
+      B.addAttribute(Attribute::InaccessibleMemOrArgMemOnly); break;
+    case lltok::kw_inlinehint: B.addAttribute(Attribute::InlineHint); break;
+    case lltok::kw_jumptable: B.addAttribute(Attribute::JumpTable); break;
+    case lltok::kw_minsize: B.addAttribute(Attribute::MinSize); break;
+    case lltok::kw_naked: B.addAttribute(Attribute::Naked); break;
+    case lltok::kw_nobuiltin: B.addAttribute(Attribute::NoBuiltin); break;
+    case lltok::kw_noduplicate: B.addAttribute(Attribute::NoDuplicate); break;
+    case lltok::kw_noimplicitfloat:
+      B.addAttribute(Attribute::NoImplicitFloat); break;
+    case lltok::kw_noinline: B.addAttribute(Attribute::NoInline); break;
+    case lltok::kw_nonlazybind: B.addAttribute(Attribute::NonLazyBind); break;
+    case lltok::kw_noredzone: B.addAttribute(Attribute::NoRedZone); break;
+    case lltok::kw_noreturn: B.addAttribute(Attribute::NoReturn); break;
+    case lltok::kw_norecurse: B.addAttribute(Attribute::NoRecurse); break;
+    case lltok::kw_nounwind: B.addAttribute(Attribute::NoUnwind); break;
+    case lltok::kw_optnone: B.addAttribute(Attribute::OptimizeNone); break;
+    case lltok::kw_optsize: B.addAttribute(Attribute::OptimizeForSize); break;
+    case lltok::kw_readnone: B.addAttribute(Attribute::ReadNone); break;
+    case lltok::kw_readonly: B.addAttribute(Attribute::ReadOnly); break;
+    case lltok::kw_returns_twice:
+      B.addAttribute(Attribute::ReturnsTwice); break;
+    case lltok::kw_ssp: B.addAttribute(Attribute::StackProtect); break;
+    case lltok::kw_sspreq: B.addAttribute(Attribute::StackProtectReq); break;
+    case lltok::kw_sspstrong:
+      B.addAttribute(Attribute::StackProtectStrong); break;
+    case lltok::kw_safestack: B.addAttribute(Attribute::SafeStack); break;
+    case lltok::kw_sanitize_address:
+      B.addAttribute(Attribute::SanitizeAddress); break;
+    case lltok::kw_sanitize_thread:
+      B.addAttribute(Attribute::SanitizeThread); break;
+    case lltok::kw_sanitize_memory:
+      B.addAttribute(Attribute::SanitizeMemory); break;
+    case lltok::kw_uwtable: B.addAttribute(Attribute::UWTable); break;
+    case lltok::kw_writeonly: B.addAttribute(Attribute::WriteOnly); break;
+
+    // Error handling.
+    case lltok::kw_inreg:
+    case lltok::kw_signext:
+    case lltok::kw_zeroext:
+      HaveError |=
+        Error(Lex.getLoc(),
+              "invalid use of attribute on a function");
+      break;
+    case lltok::kw_byval:
+    case lltok::kw_dereferenceable:
+    case lltok::kw_dereferenceable_or_null:
+    case lltok::kw_inalloca:
+    case lltok::kw_nest:
+    case lltok::kw_noalias:
+    case lltok::kw_nocapture:
+    case lltok::kw_nonnull:
+    case lltok::kw_returned:
+    case lltok::kw_sret:
+    case lltok::kw_swifterror:
+    case lltok::kw_swiftself:
+    // VISC Parameter only attributes
+    case lltok::kw_in:
+    case lltok::kw_out:
+    case lltok::kw_inout:
+
+      HaveError |=
+        Error(Lex.getLoc(),
+              "invalid use of parameter-only attribute on a function");
+      break;
+    }
+
+    Lex.Lex();
+  }
+}
+
+//===----------------------------------------------------------------------===//
+// GlobalValue Reference/Resolution Routines.
+//===----------------------------------------------------------------------===//
+
+static inline GlobalValue *createGlobalFwdRef(Module *M, PointerType *PTy,
+                                              const std::string &Name) {
+  if (auto *FT = dyn_cast<FunctionType>(PTy->getElementType()))
+    return Function::Create(FT, GlobalValue::ExternalWeakLinkage, Name, M);
+  else
+    return new GlobalVariable(*M, PTy->getElementType(), false,
+                              GlobalValue::ExternalWeakLinkage, nullptr, Name,
+                              nullptr, GlobalVariable::NotThreadLocal,
+                              PTy->getAddressSpace());
+}
+
+/// GetGlobalVal - Get a value with the specified name or ID, creating a
+/// forward reference record if needed.  This can return null if the value
+/// exists but does not have the right type.
+GlobalValue *LLParser::GetGlobalVal(const std::string &Name, Type *Ty,
+                                    LocTy Loc) {
+  PointerType *PTy = dyn_cast<PointerType>(Ty);
+  if (!PTy) {
+    Error(Loc, "global variable reference must have pointer type");
+    return nullptr;
+  }
+
+  // Look this name up in the normal function symbol table.
+  GlobalValue *Val =
+    cast_or_null<GlobalValue>(M->getValueSymbolTable().lookup(Name));
+
+  // If this is a forward reference for the value, see if we already created a
+  // forward ref record.
+  if (!Val) {
+    auto I = ForwardRefVals.find(Name);
+    if (I != ForwardRefVals.end())
+      Val = I->second.first;
+  }
+
+  // If we have the value in the symbol table or fwd-ref table, return it.
+  if (Val) {
+    if (Val->getType() == Ty) return Val;
+    Error(Loc, "'@" + Name + "' defined with type '" +
+          getTypeString(Val->getType()) + "'");
+    return nullptr;
+  }
+
+  // Otherwise, create a new forward reference for this value and remember it.
+  GlobalValue *FwdVal = createGlobalFwdRef(M, PTy, Name);
+  ForwardRefVals[Name] = std::make_pair(FwdVal, Loc);
+  return FwdVal;
+}
+
+GlobalValue *LLParser::GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc) {
+  PointerType *PTy = dyn_cast<PointerType>(Ty);
+  if (!PTy) {
+    Error(Loc, "global variable reference must have pointer type");
+    return nullptr;
+  }
+
+  GlobalValue *Val = ID < NumberedVals.size() ? NumberedVals[ID] : nullptr;
+
+  // If this is a forward reference for the value, see if we already created a
+  // forward ref record.
+  if (!Val) {
+    auto I = ForwardRefValIDs.find(ID);
+    if (I != ForwardRefValIDs.end())
+      Val = I->second.first;
+  }
+
+  // If we have the value in the symbol table or fwd-ref table, return it.
+  if (Val) {
+    if (Val->getType() == Ty) return Val;
+    Error(Loc, "'@" + Twine(ID) + "' defined with type '" +
+          getTypeString(Val->getType()) + "'");
+    return nullptr;
+  }
+
+  // Otherwise, create a new forward reference for this value and remember it.
+  GlobalValue *FwdVal = createGlobalFwdRef(M, PTy, "");
+  ForwardRefValIDs[ID] = std::make_pair(FwdVal, Loc);
+  return FwdVal;
+}
+
+//===----------------------------------------------------------------------===//
+// Comdat Reference/Resolution Routines.
+//===----------------------------------------------------------------------===//
+
+Comdat *LLParser::getComdat(const std::string &Name, LocTy Loc) {
+  // Look this name up in the comdat symbol table.
+  Module::ComdatSymTabType &ComdatSymTab = M->getComdatSymbolTable();
+  Module::ComdatSymTabType::iterator I = ComdatSymTab.find(Name);
+  if (I != ComdatSymTab.end())
+    return &I->second;
+
+  // Otherwise, create a new forward reference for this value and remember it.
+  Comdat *C = M->getOrInsertComdat(Name);
+  ForwardRefComdats[Name] = Loc;
+  return C;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper Routines.
+//===----------------------------------------------------------------------===//
+
+/// ParseToken - If the current token has the specified kind, eat it and return
+/// success.  Otherwise, emit the specified error and return failure.
+bool LLParser::ParseToken(lltok::Kind T, const char *ErrMsg) {
+  if (Lex.getKind() != T)
+    return TokError(ErrMsg);
+  Lex.Lex();
+  return false;
+}
+
+/// ParseStringConstant
+///   ::= StringConstant
+bool LLParser::ParseStringConstant(std::string &Result) {
+  if (Lex.getKind() != lltok::StringConstant)
+    return TokError("expected string constant");
+  Result = Lex.getStrVal();
+  Lex.Lex();
+  return false;
+}
+
+/// ParseUInt32
+///   ::= uint32
+bool LLParser::ParseUInt32(uint32_t &Val) {
+  if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned())
+    return TokError("expected integer");
+  uint64_t Val64 = Lex.getAPSIntVal().getLimitedValue(0xFFFFFFFFULL+1);
+  if (Val64 != unsigned(Val64))
+    return TokError("expected 32-bit integer (too large)");
+  Val = Val64;
+  Lex.Lex();
+  return false;
+}
+
+/// ParseUInt64
+///   ::= uint64
+bool LLParser::ParseUInt64(uint64_t &Val) {
+  if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned())
+    return TokError("expected integer");
+  Val = Lex.getAPSIntVal().getLimitedValue();
+  Lex.Lex();
+  return false;
+}
+
+/// ParseTLSModel
+///   := 'localdynamic'
+///   := 'initialexec'
+///   := 'localexec'
+bool LLParser::ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM) {
+  switch (Lex.getKind()) {
+    default:
+      return TokError("expected localdynamic, initialexec or localexec");
+    case lltok::kw_localdynamic:
+      TLM = GlobalVariable::LocalDynamicTLSModel;
+      break;
+    case lltok::kw_initialexec:
+      TLM = GlobalVariable::InitialExecTLSModel;
+      break;
+    case lltok::kw_localexec:
+      TLM = GlobalVariable::LocalExecTLSModel;
+      break;
+  }
+
+  Lex.Lex();
+  return false;
+}
+
+/// ParseOptionalThreadLocal
+///   := /*empty*/
+///   := 'thread_local'
+///   := 'thread_local' '(' tlsmodel ')'
+bool LLParser::ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM) {
+  TLM = GlobalVariable::NotThreadLocal;
+  if (!EatIfPresent(lltok::kw_thread_local))
+    return false;
+
+  TLM = GlobalVariable::GeneralDynamicTLSModel;
+  if (Lex.getKind() == lltok::lparen) {
+    Lex.Lex();
+    return ParseTLSModel(TLM) ||
+      ParseToken(lltok::rparen, "expected ')' after thread local model");
+  }
+  return false;
+}
+
+/// ParseOptionalAddrSpace
+///   := /*empty*/
+///   := 'addrspace' '(' uint32 ')'
+bool LLParser::ParseOptionalAddrSpace(unsigned &AddrSpace) {
+  AddrSpace = 0;
+  if (!EatIfPresent(lltok::kw_addrspace))
+    return false;
+  return ParseToken(lltok::lparen, "expected '(' in address space") ||
+         ParseUInt32(AddrSpace) ||
+         ParseToken(lltok::rparen, "expected ')' in address space");
+}
+
+/// ParseStringAttribute
+///   := StringConstant
+///   := StringConstant '=' StringConstant
+bool LLParser::ParseStringAttribute(AttrBuilder &B) {
+  std::string Attr = Lex.getStrVal();
+  Lex.Lex();
+  std::string Val;
+  if (EatIfPresent(lltok::equal) && ParseStringConstant(Val))
+    return true;
+  B.addAttribute(Attr, Val);
+  return false;
+}
+
+/// ParseOptionalParamAttrs - Parse a potentially empty list of parameter attributes.
+bool LLParser::ParseOptionalParamAttrs(AttrBuilder &B) {
+  bool HaveError = false;
+
+  B.clear();
+
+  while (true) {
+    lltok::Kind Token = Lex.getKind();
+    switch (Token) {
+    default:  // End of attributes.
+      return HaveError;
+    case lltok::StringConstant: {
+      if (ParseStringAttribute(B))
+        return true;
+      continue;
+    }
+    case lltok::kw_align: {
+      unsigned Alignment;
+      if (ParseOptionalAlignment(Alignment))
+        return true;
+      B.addAlignmentAttr(Alignment);
+      continue;
+    }
+    case lltok::kw_byval:           B.addAttribute(Attribute::ByVal); break;
+    case lltok::kw_dereferenceable: {
+      uint64_t Bytes;
+      if (ParseOptionalDerefAttrBytes(lltok::kw_dereferenceable, Bytes))
+        return true;
+      B.addDereferenceableAttr(Bytes);
+      continue;
+    }
+    case lltok::kw_dereferenceable_or_null: {
+      uint64_t Bytes;
+      if (ParseOptionalDerefAttrBytes(lltok::kw_dereferenceable_or_null, Bytes))
+        return true;
+      B.addDereferenceableOrNullAttr(Bytes);
+      continue;
+    }
+    case lltok::kw_inalloca:        B.addAttribute(Attribute::InAlloca); break;
+    case lltok::kw_inreg:           B.addAttribute(Attribute::InReg); break;
+    case lltok::kw_nest:            B.addAttribute(Attribute::Nest); break;
+    case lltok::kw_noalias:         B.addAttribute(Attribute::NoAlias); break;
+    case lltok::kw_nocapture:       B.addAttribute(Attribute::NoCapture); break;
+    case lltok::kw_nonnull:         B.addAttribute(Attribute::NonNull); break;
+    case lltok::kw_readnone:        B.addAttribute(Attribute::ReadNone); break;
+    case lltok::kw_readonly:        B.addAttribute(Attribute::ReadOnly); break;
+    case lltok::kw_returned:        B.addAttribute(Attribute::Returned); break;
+    case lltok::kw_signext:         B.addAttribute(Attribute::SExt); break;
+    case lltok::kw_sret:            B.addAttribute(Attribute::StructRet); break;
+    case lltok::kw_swifterror:      B.addAttribute(Attribute::SwiftError); break;
+    case lltok::kw_swiftself:       B.addAttribute(Attribute::SwiftSelf); break;
+    case lltok::kw_writeonly:       B.addAttribute(Attribute::WriteOnly); break;
+    case lltok::kw_zeroext:         B.addAttribute(Attribute::ZExt); break;
+    // VISC parameter attributes
+    case lltok::kw_in:              B.addAttribute(Attribute::In); break;
+    case lltok::kw_out:             B.addAttribute(Attribute::Out); break;
+    case lltok::kw_inout:           B.addAttribute(Attribute::InOut); break;
+
+    case lltok::kw_alignstack:
+    case lltok::kw_alwaysinline:
+    case lltok::kw_argmemonly:
+    case lltok::kw_builtin:
+    case lltok::kw_inlinehint:
+    case lltok::kw_jumptable:
+    case lltok::kw_minsize:
+    case lltok::kw_naked:
+    case lltok::kw_nobuiltin:
+    case lltok::kw_noduplicate:
+    case lltok::kw_noimplicitfloat:
+    case lltok::kw_noinline:
+    case lltok::kw_nonlazybind:
+    case lltok::kw_noredzone:
+    case lltok::kw_noreturn:
+    case lltok::kw_nounwind:
+    case lltok::kw_optnone:
+    case lltok::kw_optsize:
+    case lltok::kw_returns_twice:
+    case lltok::kw_sanitize_address:
+    case lltok::kw_sanitize_memory:
+    case lltok::kw_sanitize_thread:
+    case lltok::kw_ssp:
+    case lltok::kw_sspreq:
+    case lltok::kw_sspstrong:
+    case lltok::kw_safestack:
+    case lltok::kw_uwtable:
+      HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
+      break;
+    }
+
+    Lex.Lex();
+  }
+}
+
+/// ParseOptionalReturnAttrs - Parse a potentially empty list of return attributes.
+bool LLParser::ParseOptionalReturnAttrs(AttrBuilder &B) {
+  bool HaveError = false;
+
+  B.clear();
+
+  while (true) {
+    lltok::Kind Token = Lex.getKind();
+    switch (Token) {
+    default:  // End of attributes.
+      return HaveError;
+    case lltok::StringConstant: {
+      if (ParseStringAttribute(B))
+        return true;
+      continue;
+    }
+    case lltok::kw_dereferenceable: {
+      uint64_t Bytes;
+      if (ParseOptionalDerefAttrBytes(lltok::kw_dereferenceable, Bytes))
+        return true;
+      B.addDereferenceableAttr(Bytes);
+      continue;
+    }
+    case lltok::kw_dereferenceable_or_null: {
+      uint64_t Bytes;
+      if (ParseOptionalDerefAttrBytes(lltok::kw_dereferenceable_or_null, Bytes))
+        return true;
+      B.addDereferenceableOrNullAttr(Bytes);
+      continue;
+    }
+    case lltok::kw_align: {
+      unsigned Alignment;
+      if (ParseOptionalAlignment(Alignment))
+        return true;
+      B.addAlignmentAttr(Alignment);
+      continue;
+    }
+    case lltok::kw_inreg:           B.addAttribute(Attribute::InReg); break;
+    case lltok::kw_noalias:         B.addAttribute(Attribute::NoAlias); break;
+    case lltok::kw_nonnull:         B.addAttribute(Attribute::NonNull); break;
+    case lltok::kw_signext:         B.addAttribute(Attribute::SExt); break;
+    case lltok::kw_zeroext:         B.addAttribute(Attribute::ZExt); break;
+
+    // Error handling.
+    case lltok::kw_byval:
+    case lltok::kw_inalloca:
+    case lltok::kw_nest:
+    case lltok::kw_nocapture:
+    case lltok::kw_returned:
+    case lltok::kw_sret:
+    case lltok::kw_swifterror:
+    case lltok::kw_swiftself:
+    // VISC Parameter only attributes
+    case lltok::kw_in:
+    case lltok::kw_out:
+    case lltok::kw_inout:
+      HaveError |= Error(Lex.getLoc(), "invalid use of parameter-only attribute");
+      break;
+
+    case lltok::kw_alignstack:
+    case lltok::kw_alwaysinline:
+    case lltok::kw_argmemonly:
+    case lltok::kw_builtin:
+    case lltok::kw_cold:
+    case lltok::kw_inlinehint:
+    case lltok::kw_jumptable:
+    case lltok::kw_minsize:
+    case lltok::kw_naked:
+    case lltok::kw_nobuiltin:
+    case lltok::kw_noduplicate:
+    case lltok::kw_noimplicitfloat:
+    case lltok::kw_noinline:
+    case lltok::kw_nonlazybind:
+    case lltok::kw_noredzone:
+    case lltok::kw_noreturn:
+    case lltok::kw_nounwind:
+    case lltok::kw_optnone:
+    case lltok::kw_optsize:
+    case lltok::kw_returns_twice:
+    case lltok::kw_sanitize_address:
+    case lltok::kw_sanitize_memory:
+    case lltok::kw_sanitize_thread:
+    case lltok::kw_ssp:
+    case lltok::kw_sspreq:
+    case lltok::kw_sspstrong:
+    case lltok::kw_safestack:
+    case lltok::kw_uwtable:
+      HaveError |= Error(Lex.getLoc(), "invalid use of function-only attribute");
+      break;
+
+    case lltok::kw_readnone:
+    case lltok::kw_readonly:
+      HaveError |= Error(Lex.getLoc(), "invalid use of attribute on return type");
+    }
+
+    Lex.Lex();
+  }
+}
+
+static unsigned parseOptionalLinkageAux(lltok::Kind Kind, bool &HasLinkage) {
+  HasLinkage = true;
+  switch (Kind) {
+  default:
+    HasLinkage = false;
+    return GlobalValue::ExternalLinkage;
+  case lltok::kw_private:
+    return GlobalValue::PrivateLinkage;
+  case lltok::kw_internal:
+    return GlobalValue::InternalLinkage;
+  case lltok::kw_weak:
+    return GlobalValue::WeakAnyLinkage;
+  case lltok::kw_weak_odr:
+    return GlobalValue::WeakODRLinkage;
+  case lltok::kw_linkonce:
+    return GlobalValue::LinkOnceAnyLinkage;
+  case lltok::kw_linkonce_odr:
+    return GlobalValue::LinkOnceODRLinkage;
+  case lltok::kw_available_externally:
+    return GlobalValue::AvailableExternallyLinkage;
+  case lltok::kw_appending:
+    return GlobalValue::AppendingLinkage;
+  case lltok::kw_common:
+    return GlobalValue::CommonLinkage;
+  case lltok::kw_extern_weak:
+    return GlobalValue::ExternalWeakLinkage;
+  case lltok::kw_external:
+    return GlobalValue::ExternalLinkage;
+  }
+}
+
+/// ParseOptionalLinkage
+///   ::= /*empty*/
+///   ::= 'private'
+///   ::= 'internal'
+///   ::= 'weak'
+///   ::= 'weak_odr'
+///   ::= 'linkonce'
+///   ::= 'linkonce_odr'
+///   ::= 'available_externally'
+///   ::= 'appending'
+///   ::= 'common'
+///   ::= 'extern_weak'
+///   ::= 'external'
+bool LLParser::ParseOptionalLinkage(unsigned &Res, bool &HasLinkage,
+                                    unsigned &Visibility,
+                                    unsigned &DLLStorageClass) {
+  Res = parseOptionalLinkageAux(Lex.getKind(), HasLinkage);
+  if (HasLinkage)
+    Lex.Lex();
+  ParseOptionalVisibility(Visibility);
+  ParseOptionalDLLStorageClass(DLLStorageClass);
+  return false;
+}
+
+/// ParseOptionalVisibility
+///   ::= /*empty*/
+///   ::= 'default'
+///   ::= 'hidden'
+///   ::= 'protected'
+///
+void LLParser::ParseOptionalVisibility(unsigned &Res) {
+  switch (Lex.getKind()) {
+  default:
+    Res = GlobalValue::DefaultVisibility;
+    return;
+  case lltok::kw_default:
+    Res = GlobalValue::DefaultVisibility;
+    break;
+  case lltok::kw_hidden:
+    Res = GlobalValue::HiddenVisibility;
+    break;
+  case lltok::kw_protected:
+    Res = GlobalValue::ProtectedVisibility;
+    break;
+  }
+  Lex.Lex();
+}
+
+/// ParseOptionalDLLStorageClass
+///   ::= /*empty*/
+///   ::= 'dllimport'
+///   ::= 'dllexport'
+///
+void LLParser::ParseOptionalDLLStorageClass(unsigned &Res) {
+  switch (Lex.getKind()) {
+  default:
+    Res = GlobalValue::DefaultStorageClass;
+    return;
+  case lltok::kw_dllimport:
+    Res = GlobalValue::DLLImportStorageClass;
+    break;
+  case lltok::kw_dllexport:
+    Res = GlobalValue::DLLExportStorageClass;
+    break;
+  }
+  Lex.Lex();
+}
+
+/// ParseOptionalCallingConv
+///   ::= /*empty*/
+///   ::= 'ccc'
+///   ::= 'fastcc'
+///   ::= 'intel_ocl_bicc'
+///   ::= 'coldcc'
+///   ::= 'x86_stdcallcc'
+///   ::= 'x86_fastcallcc'
+///   ::= 'x86_thiscallcc'
+///   ::= 'x86_vectorcallcc'
+///   ::= 'arm_apcscc'
+///   ::= 'arm_aapcscc'
+///   ::= 'arm_aapcs_vfpcc'
+///   ::= 'msp430_intrcc'
+///   ::= 'avr_intrcc'
+///   ::= 'avr_signalcc'
+///   ::= 'ptx_kernel'
+///   ::= 'ptx_device'
+///   ::= 'spir_func'
+///   ::= 'spir_kernel'
+///   ::= 'x86_64_sysvcc'
+///   ::= 'x86_64_win64cc'
+///   ::= 'webkit_jscc'
+///   ::= 'anyregcc'
+///   ::= 'preserve_mostcc'
+///   ::= 'preserve_allcc'
+///   ::= 'ghccc'
+///   ::= 'swiftcc'
+///   ::= 'x86_intrcc'
+///   ::= 'hhvmcc'
+///   ::= 'hhvm_ccc'
+///   ::= 'cxx_fast_tlscc'
+///   ::= 'amdgpu_vs'
+///   ::= 'amdgpu_tcs'
+///   ::= 'amdgpu_tes'
+///   ::= 'amdgpu_gs'
+///   ::= 'amdgpu_ps'
+///   ::= 'amdgpu_cs'
+///   ::= 'amdgpu_kernel'
+///   ::= 'cc' UINT
+///
+bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
+  switch (Lex.getKind()) {
+  default:                       CC = CallingConv::C; return false;
+  case lltok::kw_ccc:            CC = CallingConv::C; break;
+  case lltok::kw_fastcc:         CC = CallingConv::Fast; break;
+  case lltok::kw_coldcc:         CC = CallingConv::Cold; break;
+  case lltok::kw_x86_stdcallcc:  CC = CallingConv::X86_StdCall; break;
+  case lltok::kw_x86_fastcallcc: CC = CallingConv::X86_FastCall; break;
+  case lltok::kw_x86_regcallcc:  CC = CallingConv::X86_RegCall; break;
+  case lltok::kw_x86_thiscallcc: CC = CallingConv::X86_ThisCall; break;
+  case lltok::kw_x86_vectorcallcc:CC = CallingConv::X86_VectorCall; break;
+  case lltok::kw_arm_apcscc:     CC = CallingConv::ARM_APCS; break;
+  case lltok::kw_arm_aapcscc:    CC = CallingConv::ARM_AAPCS; break;
+  case lltok::kw_arm_aapcs_vfpcc:CC = CallingConv::ARM_AAPCS_VFP; break;
+  case lltok::kw_msp430_intrcc:  CC = CallingConv::MSP430_INTR; break;
+  case lltok::kw_avr_intrcc:     CC = CallingConv::AVR_INTR; break;
+  case lltok::kw_avr_signalcc:   CC = CallingConv::AVR_SIGNAL; break;
+  case lltok::kw_ptx_kernel:     CC = CallingConv::PTX_Kernel; break;
+  case lltok::kw_ptx_device:     CC = CallingConv::PTX_Device; break;
+  case lltok::kw_spir_kernel:    CC = CallingConv::SPIR_KERNEL; break;
+  case lltok::kw_spir_func:      CC = CallingConv::SPIR_FUNC; break;
+  case lltok::kw_intel_ocl_bicc: CC = CallingConv::Intel_OCL_BI; break;
+  case lltok::kw_x86_64_sysvcc:  CC = CallingConv::X86_64_SysV; break;
+  case lltok::kw_x86_64_win64cc: CC = CallingConv::X86_64_Win64; break;
+  case lltok::kw_webkit_jscc:    CC = CallingConv::WebKit_JS; break;
+  case lltok::kw_anyregcc:       CC = CallingConv::AnyReg; break;
+  case lltok::kw_preserve_mostcc:CC = CallingConv::PreserveMost; break;
+  case lltok::kw_preserve_allcc: CC = CallingConv::PreserveAll; break;
+  case lltok::kw_ghccc:          CC = CallingConv::GHC; break;
+  case lltok::kw_swiftcc:        CC = CallingConv::Swift; break;
+  case lltok::kw_x86_intrcc:     CC = CallingConv::X86_INTR; break;
+  case lltok::kw_hhvmcc:         CC = CallingConv::HHVM; break;
+  case lltok::kw_hhvm_ccc:       CC = CallingConv::HHVM_C; break;
+  case lltok::kw_cxx_fast_tlscc: CC = CallingConv::CXX_FAST_TLS; break;
+  case lltok::kw_amdgpu_vs:      CC = CallingConv::AMDGPU_VS; break;
+  case lltok::kw_amdgpu_gs:      CC = CallingConv::AMDGPU_GS; break;
+  case lltok::kw_amdgpu_ps:      CC = CallingConv::AMDGPU_PS; break;
+  case lltok::kw_amdgpu_cs:      CC = CallingConv::AMDGPU_CS; break;
+  case lltok::kw_amdgpu_kernel:  CC = CallingConv::AMDGPU_KERNEL; break;
+  case lltok::kw_cc: {
+      Lex.Lex();
+      return ParseUInt32(CC);
+    }
+  }
+
+  Lex.Lex();
+  return false;
+}
+
+/// ParseMetadataAttachment
+///   ::= !dbg !42
+bool LLParser::ParseMetadataAttachment(unsigned &Kind, MDNode *&MD) {
+  assert(Lex.getKind() == lltok::MetadataVar && "Expected metadata attachment");
+
+  std::string Name = Lex.getStrVal();
+  Kind = M->getMDKindID(Name);
+  Lex.Lex();
+
+  return ParseMDNode(MD);
+}
+
+/// ParseInstructionMetadata
+///   ::= !dbg !42 (',' !dbg !57)*
+bool LLParser::ParseInstructionMetadata(Instruction &Inst) {
+  do {
+    if (Lex.getKind() != lltok::MetadataVar)
+      return TokError("expected metadata after comma");
+
+    unsigned MDK;
+    MDNode *N;
+    if (ParseMetadataAttachment(MDK, N))
+      return true;
+
+    Inst.setMetadata(MDK, N);
+    if (MDK == LLVMContext::MD_tbaa)
+      InstsWithTBAATag.push_back(&Inst);
+
+    // If this is the end of the list, we're done.
+  } while (EatIfPresent(lltok::comma));
+  return false;
+}
+
+/// ParseGlobalObjectMetadataAttachment
+///   ::= !dbg !57
+bool LLParser::ParseGlobalObjectMetadataAttachment(GlobalObject &GO) {
+  unsigned MDK;
+  MDNode *N;
+  if (ParseMetadataAttachment(MDK, N))
+    return true;
+
+  GO.addMetadata(MDK, *N);
+  return false;
+}
+
+/// ParseOptionalFunctionMetadata
+///   ::= (!dbg !57)*
+bool LLParser::ParseOptionalFunctionMetadata(Function &F) {
+  while (Lex.getKind() == lltok::MetadataVar)
+    if (ParseGlobalObjectMetadataAttachment(F))
+      return true;
+  return false;
+}
+
+/// ParseOptionalAlignment
+///   ::= /* empty */
+///   ::= 'align' 4
+bool LLParser::ParseOptionalAlignment(unsigned &Alignment) {
+  Alignment = 0;
+  if (!EatIfPresent(lltok::kw_align))
+    return false;
+  LocTy AlignLoc = Lex.getLoc();
+  if (ParseUInt32(Alignment)) return true;
+  if (!isPowerOf2_32(Alignment))
+    return Error(AlignLoc, "alignment is not a power of two");
+  if (Alignment > Value::MaximumAlignment)
+    return Error(AlignLoc, "huge alignments are not supported yet");
+  return false;
+}
+
+/// ParseOptionalDerefAttrBytes
+///   ::= /* empty */
+///   ::= AttrKind '(' 4 ')'
+///
+/// where AttrKind is either 'dereferenceable' or 'dereferenceable_or_null'.
+bool LLParser::ParseOptionalDerefAttrBytes(lltok::Kind AttrKind,
+                                           uint64_t &Bytes) {
+  assert((AttrKind == lltok::kw_dereferenceable ||
+          AttrKind == lltok::kw_dereferenceable_or_null) &&
+         "contract!");
+
+  Bytes = 0;
+  if (!EatIfPresent(AttrKind))
+    return false;
+  LocTy ParenLoc = Lex.getLoc();
+  if (!EatIfPresent(lltok::lparen))
+    return Error(ParenLoc, "expected '('");
+  LocTy DerefLoc = Lex.getLoc();
+  if (ParseUInt64(Bytes)) return true;
+  ParenLoc = Lex.getLoc();
+  if (!EatIfPresent(lltok::rparen))
+    return Error(ParenLoc, "expected ')'");
+  if (!Bytes)
+    return Error(DerefLoc, "dereferenceable bytes must be non-zero");
+  return false;
+}
+
+/// ParseOptionalCommaAlign
+///   ::=
+///   ::= ',' align 4
+///
+/// This returns with AteExtraComma set to true if it ate an excess comma at the
+/// end.
+bool LLParser::ParseOptionalCommaAlign(unsigned &Alignment,
+                                       bool &AteExtraComma) {
+  AteExtraComma = false;
+  while (EatIfPresent(lltok::comma)) {
+    // Metadata at the end is an early exit.
+    if (Lex.getKind() == lltok::MetadataVar) {
+      AteExtraComma = true;
+      return false;
+    }
+
+    if (Lex.getKind() != lltok::kw_align)
+      return Error(Lex.getLoc(), "expected metadata or 'align'");
+
+    if (ParseOptionalAlignment(Alignment)) return true;
+  }
+
+  return false;
+}
+
+bool LLParser::parseAllocSizeArguments(unsigned &BaseSizeArg,
+                                       Optional<unsigned> &HowManyArg) {
+  Lex.Lex();
+
+  auto StartParen = Lex.getLoc();
+  if (!EatIfPresent(lltok::lparen))
+    return Error(StartParen, "expected '('");
+
+  if (ParseUInt32(BaseSizeArg))
+    return true;
+
+  if (EatIfPresent(lltok::comma)) {
+    auto HowManyAt = Lex.getLoc();
+    unsigned HowMany;
+    if (ParseUInt32(HowMany))
+      return true;
+    if (HowMany == BaseSizeArg)
+      return Error(HowManyAt,
+                   "'allocsize' indices can't refer to the same parameter");
+    HowManyArg = HowMany;
+  } else
+    HowManyArg = None;
+
+  auto EndParen = Lex.getLoc();
+  if (!EatIfPresent(lltok::rparen))
+    return Error(EndParen, "expected ')'");
+  return false;
+}
+
+/// ParseScopeAndOrdering
+///   if isAtomic: ::= 'singlethread'? AtomicOrdering
+///   else: ::=
+///
+/// This sets Scope and Ordering to the parsed values.
+bool LLParser::ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
+                                     AtomicOrdering &Ordering) {
+  if (!isAtomic)
+    return false;
+
+  Scope = CrossThread;
+  if (EatIfPresent(lltok::kw_singlethread))
+    Scope = SingleThread;
+
+  return ParseOrdering(Ordering);
+}
+
+/// ParseOrdering
+///   ::= AtomicOrdering
+///
+/// This sets Ordering to the parsed value.
+bool LLParser::ParseOrdering(AtomicOrdering &Ordering) {
+  switch (Lex.getKind()) {
+  default: return TokError("Expected ordering on atomic instruction");
+  case lltok::kw_unordered: Ordering = AtomicOrdering::Unordered; break;
+  case lltok::kw_monotonic: Ordering = AtomicOrdering::Monotonic; break;
+  // Not specified yet:
+  // case lltok::kw_consume: Ordering = AtomicOrdering::Consume; break;
+  case lltok::kw_acquire: Ordering = AtomicOrdering::Acquire; break;
+  case lltok::kw_release: Ordering = AtomicOrdering::Release; break;
+  case lltok::kw_acq_rel: Ordering = AtomicOrdering::AcquireRelease; break;
+  case lltok::kw_seq_cst:
+    Ordering = AtomicOrdering::SequentiallyConsistent;
+    break;
+  }
+  Lex.Lex();
+  return false;
+}
+
+/// ParseOptionalStackAlignment
+///   ::= /* empty */
+///   ::= 'alignstack' '(' 4 ')'
+bool LLParser::ParseOptionalStackAlignment(unsigned &Alignment) {
+  Alignment = 0;
+  if (!EatIfPresent(lltok::kw_alignstack))
+    return false;
+  LocTy ParenLoc = Lex.getLoc();
+  if (!EatIfPresent(lltok::lparen))
+    return Error(ParenLoc, "expected '('");
+  LocTy AlignLoc = Lex.getLoc();
+  if (ParseUInt32(Alignment)) return true;
+  ParenLoc = Lex.getLoc();
+  if (!EatIfPresent(lltok::rparen))
+    return Error(ParenLoc, "expected ')'");
+  if (!isPowerOf2_32(Alignment))
+    return Error(AlignLoc, "stack alignment is not a power of two");
+  return false;
+}
+
+/// ParseIndexList - This parses the index list for an insert/extractvalue
+/// instruction.  This sets AteExtraComma in the case where we eat an extra
+/// comma at the end of the line and find that it is followed by metadata.
+/// Clients that don't allow metadata can call the version of this function that
+/// only takes one argument.
+///
+/// ParseIndexList
+///    ::=  (',' uint32)+
+///
+bool LLParser::ParseIndexList(SmallVectorImpl<unsigned> &Indices,
+                              bool &AteExtraComma) {
+  AteExtraComma = false;
+
+  if (Lex.getKind() != lltok::comma)
+    return TokError("expected ',' as start of index list");
+
+  while (EatIfPresent(lltok::comma)) {
+    if (Lex.getKind() == lltok::MetadataVar) {
+      if (Indices.empty()) return TokError("expected index");
+      AteExtraComma = true;
+      return false;
+    }
+    unsigned Idx = 0;
+    if (ParseUInt32(Idx)) return true;
+    Indices.push_back(Idx);
+  }
+
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Parsing.
+//===----------------------------------------------------------------------===//
+
+/// ParseType - Parse a type.
+bool LLParser::ParseType(Type *&Result, const Twine &Msg, bool AllowVoid) {
+  SMLoc TypeLoc = Lex.getLoc();
+  switch (Lex.getKind()) {
+  default:
+    return TokError(Msg);
+  case lltok::Type:
+    // Type ::= 'float' | 'void' (etc)
+    Result = Lex.getTyVal();
+    Lex.Lex();
+    break;
+  case lltok::lbrace:
+    // Type ::= StructType
+    if (ParseAnonStructType(Result, false))
+      return true;
+    break;
+  case lltok::lsquare:
+    // Type ::= '[' ... ']'
+    Lex.Lex(); // eat the lsquare.
+    if (ParseArrayVectorType(Result, false))
+      return true;
+    break;
+  case lltok::less: // Either vector or packed struct.
+    // Type ::= '<' ... '>'
+    Lex.Lex();
+    if (Lex.getKind() == lltok::lbrace) {
+      if (ParseAnonStructType(Result, true) ||
+          ParseToken(lltok::greater, "expected '>' at end of packed struct"))
+        return true;
+    } else if (ParseArrayVectorType(Result, true))
+      return true;
+    break;
+  case lltok::LocalVar: {
+    // Type ::= %foo
+    std::pair<Type*, LocTy> &Entry = NamedTypes[Lex.getStrVal()];
+
+    // If the type hasn't been defined yet, create a forward definition and
+    // remember where that forward def'n was seen (in case it never is defined).
+    if (!Entry.first) {
+      Entry.first = StructType::create(Context, Lex.getStrVal());
+      Entry.second = Lex.getLoc();
+    }
+    Result = Entry.first;
+    Lex.Lex();
+    break;
+  }
+
+  case lltok::LocalVarID: {
+    // Type ::= %4
+    std::pair<Type*, LocTy> &Entry = NumberedTypes[Lex.getUIntVal()];
+
+    // If the type hasn't been defined yet, create a forward definition and
+    // remember where that forward def'n was seen (in case it never is defined).
+    if (!Entry.first) {
+      Entry.first = StructType::create(Context);
+      Entry.second = Lex.getLoc();
+    }
+    Result = Entry.first;
+    Lex.Lex();
+    break;
+  }
+  }
+
+  // Parse the type suffixes.
+  while (true) {
+    switch (Lex.getKind()) {
+    // End of type.
+    default:
+      if (!AllowVoid && Result->isVoidTy())
+        return Error(TypeLoc, "void type only allowed for function results");
+      return false;
+
+    // Type ::= Type '*'
+    case lltok::star:
+      if (Result->isLabelTy())
+        return TokError("basic block pointers are invalid");
+      if (Result->isVoidTy())
+        return TokError("pointers to void are invalid - use i8* instead");
+      if (!PointerType::isValidElementType(Result))
+        return TokError("pointer to this type is invalid");
+      Result = PointerType::getUnqual(Result);
+      Lex.Lex();
+      break;
+
+    // Type ::= Type 'addrspace' '(' uint32 ')' '*'
+    case lltok::kw_addrspace: {
+      if (Result->isLabelTy())
+        return TokError("basic block pointers are invalid");
+      if (Result->isVoidTy())
+        return TokError("pointers to void are invalid; use i8* instead");
+      if (!PointerType::isValidElementType(Result))
+        return TokError("pointer to this type is invalid");
+      unsigned AddrSpace;
+      if (ParseOptionalAddrSpace(AddrSpace) ||
+          ParseToken(lltok::star, "expected '*' in address space"))
+        return true;
+
+      Result = PointerType::get(Result, AddrSpace);
+      break;
+    }
+
+    /// Types '(' ArgTypeListI ')' OptFuncAttrs
+    case lltok::lparen:
+      if (ParseFunctionType(Result))
+        return true;
+      break;
+    }
+  }
+}
+
+/// ParseParameterList
+///    ::= '(' ')'
+///    ::= '(' Arg (',' Arg)* ')'
+///  Arg
+///    ::= Type OptionalAttributes Value OptionalAttributes
+bool LLParser::ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
+                                  PerFunctionState &PFS, bool IsMustTailCall,
+                                  bool InVarArgsFunc) {
+  if (ParseToken(lltok::lparen, "expected '(' in call"))
+    return true;
+
+  unsigned AttrIndex = 1;
+  while (Lex.getKind() != lltok::rparen) {
+    // If this isn't the first argument, we need a comma.
+    if (!ArgList.empty() &&
+        ParseToken(lltok::comma, "expected ',' in argument list"))
+      return true;
+
+    // Parse an ellipsis if this is a musttail call in a variadic function.
+    if (Lex.getKind() == lltok::dotdotdot) {
+      const char *Msg = "unexpected ellipsis in argument list for ";
+      if (!IsMustTailCall)
+        return TokError(Twine(Msg) + "non-musttail call");
+      if (!InVarArgsFunc)
+        return TokError(Twine(Msg) + "musttail call in non-varargs function");
+      Lex.Lex();  // Lex the '...', it is purely for readability.
+      return ParseToken(lltok::rparen, "expected ')' at end of argument list");
+    }
+
+    // Parse the argument.
+    LocTy ArgLoc;
+    Type *ArgTy = nullptr;
+    AttrBuilder ArgAttrs;
+    Value *V;
+    if (ParseType(ArgTy, ArgLoc))
+      return true;
+
+    if (ArgTy->isMetadataTy()) {
+      if (ParseMetadataAsValue(V, PFS))
+        return true;
+    } else {
+      // Otherwise, handle normal operands.
+      if (ParseOptionalParamAttrs(ArgAttrs) || ParseValue(ArgTy, V, PFS))
+        return true;
+    }
+    ArgList.push_back(ParamInfo(ArgLoc, V, AttributeSet::get(V->getContext(),
+                                                             AttrIndex++,
+                                                             ArgAttrs)));
+  }
+
+  if (IsMustTailCall && InVarArgsFunc)
+    return TokError("expected '...' at end of argument list for musttail call "
+                    "in varargs function");
+
+  Lex.Lex();  // Lex the ')'.
+  return false;
+}
+
+/// ParseOptionalOperandBundles
+///    ::= /*empty*/
+///    ::= '[' OperandBundle [, OperandBundle ]* ']'
+///
+/// OperandBundle
+///    ::= bundle-tag '(' ')'
+///    ::= bundle-tag '(' Type Value [, Type Value ]* ')'
+///
+/// bundle-tag ::= String Constant
+bool LLParser::ParseOptionalOperandBundles(
+    SmallVectorImpl<OperandBundleDef> &BundleList, PerFunctionState &PFS) {
+  LocTy BeginLoc = Lex.getLoc();
+  if (!EatIfPresent(lltok::lsquare))
+    return false;
+
+  while (Lex.getKind() != lltok::rsquare) {
+    // If this isn't the first operand bundle, we need a comma.
+    if (!BundleList.empty() &&
+        ParseToken(lltok::comma, "expected ',' in input list"))
+      return true;
+
+    std::string Tag;
+    if (ParseStringConstant(Tag))
+      return true;
+
+    if (ParseToken(lltok::lparen, "expected '(' in operand bundle"))
+      return true;
+
+    std::vector<Value *> Inputs;
+    while (Lex.getKind() != lltok::rparen) {
+      // If this isn't the first input, we need a comma.
+      if (!Inputs.empty() &&
+          ParseToken(lltok::comma, "expected ',' in input list"))
+        return true;
+
+      Type *Ty = nullptr;
+      Value *Input = nullptr;
+      if (ParseType(Ty) || ParseValue(Ty, Input, PFS))
+        return true;
+      Inputs.push_back(Input);
+    }
+
+    BundleList.emplace_back(std::move(Tag), std::move(Inputs));
+
+    Lex.Lex(); // Lex the ')'.
+  }
+
+  if (BundleList.empty())
+    return Error(BeginLoc, "operand bundle set must not be empty");
+
+  Lex.Lex(); // Lex the ']'.
+  return false;
+}
+
+/// ParseArgumentList - Parse the argument list for a function type or function
+/// prototype.
+///   ::= '(' ArgTypeListI ')'
+/// ArgTypeListI
+///   ::= /*empty*/
+///   ::= '...'
+///   ::= ArgTypeList ',' '...'
+///   ::= ArgType (',' ArgType)*
+///
+bool LLParser::ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList,
+                                 bool &isVarArg){
+  isVarArg = false;
+  assert(Lex.getKind() == lltok::lparen);
+  Lex.Lex(); // eat the (.
+
+  if (Lex.getKind() == lltok::rparen) {
+    // empty
+  } else if (Lex.getKind() == lltok::dotdotdot) {
+    isVarArg = true;
+    Lex.Lex();
+  } else {
+    LocTy TypeLoc = Lex.getLoc();
+    Type *ArgTy = nullptr;
+    AttrBuilder Attrs;
+    std::string Name;
+
+    if (ParseType(ArgTy) ||
+        ParseOptionalParamAttrs(Attrs)) return true;
+
+    if (ArgTy->isVoidTy())
+      return Error(TypeLoc, "argument can not have void type");
+
+    if (Lex.getKind() == lltok::LocalVar) {
+      Name = Lex.getStrVal();
+      Lex.Lex();
+    }
+
+    if (!FunctionType::isValidArgumentType(ArgTy))
+      return Error(TypeLoc, "invalid type for function argument");
+
+    unsigned AttrIndex = 1;
+    ArgList.emplace_back(TypeLoc, ArgTy, AttributeSet::get(ArgTy->getContext(),
+                                                           AttrIndex++, Attrs),
+                         std::move(Name));
+
+    while (EatIfPresent(lltok::comma)) {
+      // Handle ... at end of arg list.
+      if (EatIfPresent(lltok::dotdotdot)) {
+        isVarArg = true;
+        break;
+      }
+
+      // Otherwise must be an argument type.
+      TypeLoc = Lex.getLoc();
+      if (ParseType(ArgTy) || ParseOptionalParamAttrs(Attrs)) return true;
+
+      if (ArgTy->isVoidTy())
+        return Error(TypeLoc, "argument can not have void type");
+
+      if (Lex.getKind() == lltok::LocalVar) {
+        Name = Lex.getStrVal();
+        Lex.Lex();
+      } else {
+        Name = "";
+      }
+
+      if (!ArgTy->isFirstClassType())
+        return Error(TypeLoc, "invalid type for function argument");
+
+      ArgList.emplace_back(
+          TypeLoc, ArgTy,
+          AttributeSet::get(ArgTy->getContext(), AttrIndex++, Attrs),
+          std::move(Name));
+    }
+  }
+
+  return ParseToken(lltok::rparen, "expected ')' at end of argument list");
+}
+
+/// ParseFunctionType
+///  ::= Type ArgumentList OptionalAttrs
+bool LLParser::ParseFunctionType(Type *&Result) {
+  assert(Lex.getKind() == lltok::lparen);
+
+  if (!FunctionType::isValidReturnType(Result))
+    return TokError("invalid function return type");
+
+  SmallVector<ArgInfo, 8> ArgList;
+  bool isVarArg;
+  if (ParseArgumentList(ArgList, isVarArg))
+    return true;
+
+  // Reject names on the arguments lists.
+  for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+    if (!ArgList[i].Name.empty())
+      return Error(ArgList[i].Loc, "argument name invalid in function type");
+    if (ArgList[i].Attrs.hasAttributes(i + 1))
+      return Error(ArgList[i].Loc,
+                   "argument attributes invalid in function type");
+  }
+
+  SmallVector<Type*, 16> ArgListTy;
+  for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
+    ArgListTy.push_back(ArgList[i].Ty);
+
+  Result = FunctionType::get(Result, ArgListTy, isVarArg);
+  return false;
+}
+
+/// ParseAnonStructType - Parse an anonymous struct type, which is inlined into
+/// other structs.
+bool LLParser::ParseAnonStructType(Type *&Result, bool Packed) {
+  SmallVector<Type*, 8> Elts;
+  if (ParseStructBody(Elts)) return true;
+
+  Result = StructType::get(Context, Elts, Packed);
+  return false;
+}
+
+/// ParseStructDefinition - Parse a struct in a 'type' definition.
+bool LLParser::ParseStructDefinition(SMLoc TypeLoc, StringRef Name,
+                                     std::pair<Type*, LocTy> &Entry,
+                                     Type *&ResultTy) {
+  // If the type was already defined, diagnose the redefinition.
+  if (Entry.first && !Entry.second.isValid())
+    return Error(TypeLoc, "redefinition of type");
+
+  // If we have opaque, just return without filling in the definition for the
+  // struct.  This counts as a definition as far as the .ll file goes.
+  if (EatIfPresent(lltok::kw_opaque)) {
+    // This type is being defined, so clear the location to indicate this.
+    Entry.second = SMLoc();
+
+    // If this type number has never been uttered, create it.
+    if (!Entry.first)
+      Entry.first = StructType::create(Context, Name);
+    ResultTy = Entry.first;
+    return false;
+  }
+
+  // If the type starts with '<', then it is either a packed struct or a vector.
+  bool isPacked = EatIfPresent(lltok::less);
+
+  // If we don't have a struct, then we have a random type alias, which we
+  // accept for compatibility with old files.  These types are not allowed to be
+  // forward referenced and not allowed to be recursive.
+  if (Lex.getKind() != lltok::lbrace) {
+    if (Entry.first)
+      return Error(TypeLoc, "forward references to non-struct type");
+
+    ResultTy = nullptr;
+    if (isPacked)
+      return ParseArrayVectorType(ResultTy, true);
+    return ParseType(ResultTy);
+  }
+
+  // This type is being defined, so clear the location to indicate this.
+  Entry.second = SMLoc();
+
+  // If this type number has never been uttered, create it.
+  if (!Entry.first)
+    Entry.first = StructType::create(Context, Name);
+
+  StructType *STy = cast<StructType>(Entry.first);
+
+  SmallVector<Type*, 8> Body;
+  if (ParseStructBody(Body) ||
+      (isPacked && ParseToken(lltok::greater, "expected '>' in packed struct")))
+    return true;
+
+  STy->setBody(Body, isPacked);
+  ResultTy = STy;
+  return false;
+}
+
+/// ParseStructType: Handles packed and unpacked types.  </> parsed elsewhere.
+///   StructType
+///     ::= '{' '}'
+///     ::= '{' Type (',' Type)* '}'
+///     ::= '<' '{' '}' '>'
+///     ::= '<' '{' Type (',' Type)* '}' '>'
+bool LLParser::ParseStructBody(SmallVectorImpl<Type*> &Body) {
+  assert(Lex.getKind() == lltok::lbrace);
+  Lex.Lex(); // Consume the '{'
+
+  // Handle the empty struct.
+  if (EatIfPresent(lltok::rbrace))
+    return false;
+
+  LocTy EltTyLoc = Lex.getLoc();
+  Type *Ty = nullptr;
+  if (ParseType(Ty)) return true;
+  Body.push_back(Ty);
+
+  if (!StructType::isValidElementType(Ty))
+    return Error(EltTyLoc, "invalid element type for struct");
+
+  while (EatIfPresent(lltok::comma)) {
+    EltTyLoc = Lex.getLoc();
+    if (ParseType(Ty)) return true;
+
+    if (!StructType::isValidElementType(Ty))
+      return Error(EltTyLoc, "invalid element type for struct");
+
+    Body.push_back(Ty);
+  }
+
+  return ParseToken(lltok::rbrace, "expected '}' at end of struct");
+}
+
+/// ParseArrayVectorType - Parse an array or vector type, assuming the first
+/// token has already been consumed.
+///   Type
+///     ::= '[' APSINTVAL 'x' Types ']'
+///     ::= '<' APSINTVAL 'x' Types '>'
+bool LLParser::ParseArrayVectorType(Type *&Result, bool isVector) {
+  if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned() ||
+      Lex.getAPSIntVal().getBitWidth() > 64)
+    return TokError("expected number in address space");
+
+  LocTy SizeLoc = Lex.getLoc();
+  uint64_t Size = Lex.getAPSIntVal().getZExtValue();
+  Lex.Lex();
+
+  if (ParseToken(lltok::kw_x, "expected 'x' after element count"))
+      return true;
+
+  LocTy TypeLoc = Lex.getLoc();
+  Type *EltTy = nullptr;
+  if (ParseType(EltTy)) return true;
+
+  if (ParseToken(isVector ? lltok::greater : lltok::rsquare,
+                 "expected end of sequential type"))
+    return true;
+
+  if (isVector) {
+    if (Size == 0)
+      return Error(SizeLoc, "zero element vector is illegal");
+    if ((unsigned)Size != Size)
+      return Error(SizeLoc, "size too large for vector");
+    if (!VectorType::isValidElementType(EltTy))
+      return Error(TypeLoc, "invalid vector element type");
+    Result = VectorType::get(EltTy, unsigned(Size));
+  } else {
+    if (!ArrayType::isValidElementType(EltTy))
+      return Error(TypeLoc, "invalid array element type");
+    Result = ArrayType::get(EltTy, Size);
+  }
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Semantic Analysis.
+//===----------------------------------------------------------------------===//
+
+LLParser::PerFunctionState::PerFunctionState(LLParser &p, Function &f,
+                                             int functionNumber)
+  : P(p), F(f), FunctionNumber(functionNumber) {
+
+  // Insert unnamed arguments into the NumberedVals list.
+  for (Argument &A : F.args())
+    if (!A.hasName())
+      NumberedVals.push_back(&A);
+}
+
+LLParser::PerFunctionState::~PerFunctionState() {
+  // If there were any forward referenced non-basicblock values, delete them.
+
+  for (const auto &P : ForwardRefVals) {
+    if (isa<BasicBlock>(P.second.first))
+      continue;
+    P.second.first->replaceAllUsesWith(
+        UndefValue::get(P.second.first->getType()));
+    delete P.second.first;
+  }
+
+  for (const auto &P : ForwardRefValIDs) {
+    if (isa<BasicBlock>(P.second.first))
+      continue;
+    P.second.first->replaceAllUsesWith(
+        UndefValue::get(P.second.first->getType()));
+    delete P.second.first;
+  }
+}
+
+bool LLParser::PerFunctionState::FinishFunction() {
+  if (!ForwardRefVals.empty())
+    return P.Error(ForwardRefVals.begin()->second.second,
+                   "use of undefined value '%" + ForwardRefVals.begin()->first +
+                   "'");
+  if (!ForwardRefValIDs.empty())
+    return P.Error(ForwardRefValIDs.begin()->second.second,
+                   "use of undefined value '%" +
+                   Twine(ForwardRefValIDs.begin()->first) + "'");
+  return false;
+}
+
+/// GetVal - Get a value with the specified name or ID, creating a
+/// forward reference record if needed.  This can return null if the value
+/// exists but does not have the right type.
+Value *LLParser::PerFunctionState::GetVal(const std::string &Name, Type *Ty,
+                                          LocTy Loc) {
+  // Look this name up in the normal function symbol table.
+  Value *Val = F.getValueSymbolTable()->lookup(Name);
+
+  // If this is a forward reference for the value, see if we already created a
+  // forward ref record.
+  if (!Val) {
+    auto I = ForwardRefVals.find(Name);
+    if (I != ForwardRefVals.end())
+      Val = I->second.first;
+  }
+
+  // If we have the value in the symbol table or fwd-ref table, return it.
+  if (Val) {
+    if (Val->getType() == Ty) return Val;
+    if (Ty->isLabelTy())
+      P.Error(Loc, "'%" + Name + "' is not a basic block");
+    else
+      P.Error(Loc, "'%" + Name + "' defined with type '" +
+              getTypeString(Val->getType()) + "'");
+    return nullptr;
+  }
+
+  // Don't make placeholders with invalid type.
+  if (!Ty->isFirstClassType()) {
+    P.Error(Loc, "invalid use of a non-first-class type");
+    return nullptr;
+  }
+
+  // Otherwise, create a new forward reference for this value and remember it.
+  Value *FwdVal;
+  if (Ty->isLabelTy()) {
+    FwdVal = BasicBlock::Create(F.getContext(), Name, &F);
+  } else {
+    FwdVal = new Argument(Ty, Name);
+  }
+
+  ForwardRefVals[Name] = std::make_pair(FwdVal, Loc);
+  return FwdVal;
+}
+
+Value *LLParser::PerFunctionState::GetVal(unsigned ID, Type *Ty, LocTy Loc) {
+  // Look this name up in the normal function symbol table.
+  Value *Val = ID < NumberedVals.size() ? NumberedVals[ID] : nullptr;
+
+  // If this is a forward reference for the value, see if we already created a
+  // forward ref record.
+  if (!Val) {
+    auto I = ForwardRefValIDs.find(ID);
+    if (I != ForwardRefValIDs.end())
+      Val = I->second.first;
+  }
+
+  // If we have the value in the symbol table or fwd-ref table, return it.
+  if (Val) {
+    if (Val->getType() == Ty) return Val;
+    if (Ty->isLabelTy())
+      P.Error(Loc, "'%" + Twine(ID) + "' is not a basic block");
+    else
+      P.Error(Loc, "'%" + Twine(ID) + "' defined with type '" +
+              getTypeString(Val->getType()) + "'");
+    return nullptr;
+  }
+
+  if (!Ty->isFirstClassType()) {
+    P.Error(Loc, "invalid use of a non-first-class type");
+    return nullptr;
+  }
+
+  // Otherwise, create a new forward reference for this value and remember it.
+  Value *FwdVal;
+  if (Ty->isLabelTy()) {
+    FwdVal = BasicBlock::Create(F.getContext(), "", &F);
+  } else {
+    FwdVal = new Argument(Ty);
+  }
+
+  ForwardRefValIDs[ID] = std::make_pair(FwdVal, Loc);
+  return FwdVal;
+}
+
+/// SetInstName - After an instruction is parsed and inserted into its
+/// basic block, this installs its name.
+bool LLParser::PerFunctionState::SetInstName(int NameID,
+                                             const std::string &NameStr,
+                                             LocTy NameLoc, Instruction *Inst) {
+  // If this instruction has void type, it cannot have a name or ID specified.
+  if (Inst->getType()->isVoidTy()) {
+    if (NameID != -1 || !NameStr.empty())
+      return P.Error(NameLoc, "instructions returning void cannot have a name");
+    return false;
+  }
+
+  // If this was a numbered instruction, verify that the instruction is the
+  // expected value and resolve any forward references.
+  if (NameStr.empty()) {
+    // If neither a name nor an ID was specified, just use the next ID.
+    if (NameID == -1)
+      NameID = NumberedVals.size();
+
+    if (unsigned(NameID) != NumberedVals.size())
+      return P.Error(NameLoc, "instruction expected to be numbered '%" +
+                     Twine(NumberedVals.size()) + "'");
+
+    auto FI = ForwardRefValIDs.find(NameID);
+    if (FI != ForwardRefValIDs.end()) {
+      Value *Sentinel = FI->second.first;
+      if (Sentinel->getType() != Inst->getType())
+        return P.Error(NameLoc, "instruction forward referenced with type '" +
+                       getTypeString(FI->second.first->getType()) + "'");
+
+      Sentinel->replaceAllUsesWith(Inst);
+      delete Sentinel;
+      ForwardRefValIDs.erase(FI);
+    }
+
+    NumberedVals.push_back(Inst);
+    return false;
+  }
+
+  // Otherwise, the instruction had a name.  Resolve forward refs and set it.
+  auto FI = ForwardRefVals.find(NameStr);
+  if (FI != ForwardRefVals.end()) {
+    Value *Sentinel = FI->second.first;
+    if (Sentinel->getType() != Inst->getType())
+      return P.Error(NameLoc, "instruction forward referenced with type '" +
+                     getTypeString(FI->second.first->getType()) + "'");
+
+    Sentinel->replaceAllUsesWith(Inst);
+    delete Sentinel;
+    ForwardRefVals.erase(FI);
+  }
+
+  // Set the name on the instruction.
+  Inst->setName(NameStr);
+
+  if (Inst->getName() != NameStr)
+    return P.Error(NameLoc, "multiple definition of local value named '" +
+                   NameStr + "'");
+  return false;
+}
+
+/// GetBB - Get a basic block with the specified name or ID, creating a
+/// forward reference record if needed.
+BasicBlock *LLParser::PerFunctionState::GetBB(const std::string &Name,
+                                              LocTy Loc) {
+  return dyn_cast_or_null<BasicBlock>(GetVal(Name,
+                                      Type::getLabelTy(F.getContext()), Loc));
+}
+
+BasicBlock *LLParser::PerFunctionState::GetBB(unsigned ID, LocTy Loc) {
+  return dyn_cast_or_null<BasicBlock>(GetVal(ID,
+                                      Type::getLabelTy(F.getContext()), Loc));
+}
+
+/// DefineBB - Define the specified basic block, which is either named or
+/// unnamed.  If there is an error, this returns null otherwise it returns
+/// the block being defined.
+BasicBlock *LLParser::PerFunctionState::DefineBB(const std::string &Name,
+                                                 LocTy Loc) {
+  BasicBlock *BB;
+  if (Name.empty())
+    BB = GetBB(NumberedVals.size(), Loc);
+  else
+    BB = GetBB(Name, Loc);
+  if (!BB) return nullptr; // Already diagnosed error.
+
+  // Move the block to the end of the function.  Forward ref'd blocks are
+  // inserted wherever they happen to be referenced.
+  F.getBasicBlockList().splice(F.end(), F.getBasicBlockList(), BB);
+
+  // Remove the block from forward ref sets.
+  if (Name.empty()) {
+    ForwardRefValIDs.erase(NumberedVals.size());
+    NumberedVals.push_back(BB);
+  } else {
+    // BB forward references are already in the function symbol table.
+    ForwardRefVals.erase(Name);
+  }
+
+  return BB;
+}
+
+//===----------------------------------------------------------------------===//
+// Constants.
+//===----------------------------------------------------------------------===//
+
+/// ParseValID - Parse an abstract value that doesn't necessarily have a
+/// type implied.  For example, if we parse "4" we don't know what integer type
+/// it has.  The value will later be combined with its type and checked for
+/// sanity.  PFS is used to convert function-local operands of metadata (since
+/// metadata operands are not just parsed here but also converted to values).
+/// PFS can be null when we are not parsing metadata values inside a function.
+bool LLParser::ParseValID(ValID &ID, PerFunctionState *PFS) {
+  ID.Loc = Lex.getLoc();
+  switch (Lex.getKind()) {
+  default: return TokError("expected value token");
+  case lltok::GlobalID:  // @42
+    ID.UIntVal = Lex.getUIntVal();
+    ID.Kind = ValID::t_GlobalID;
+    break;
+  case lltok::GlobalVar:  // @foo
+    ID.StrVal = Lex.getStrVal();
+    ID.Kind = ValID::t_GlobalName;
+    break;
+  case lltok::LocalVarID:  // %42
+    ID.UIntVal = Lex.getUIntVal();
+    ID.Kind = ValID::t_LocalID;
+    break;
+  case lltok::LocalVar:  // %foo
+    ID.StrVal = Lex.getStrVal();
+    ID.Kind = ValID::t_LocalName;
+    break;
+  case lltok::APSInt:
+    ID.APSIntVal = Lex.getAPSIntVal();
+    ID.Kind = ValID::t_APSInt;
+    break;
+  case lltok::APFloat:
+    ID.APFloatVal = Lex.getAPFloatVal();
+    ID.Kind = ValID::t_APFloat;
+    break;
+  case lltok::kw_true:
+    ID.ConstantVal = ConstantInt::getTrue(Context);
+    ID.Kind = ValID::t_Constant;
+    break;
+  case lltok::kw_false:
+    ID.ConstantVal = ConstantInt::getFalse(Context);
+    ID.Kind = ValID::t_Constant;
+    break;
+  case lltok::kw_null: ID.Kind = ValID::t_Null; break;
+  case lltok::kw_undef: ID.Kind = ValID::t_Undef; break;
+  case lltok::kw_zeroinitializer: ID.Kind = ValID::t_Zero; break;
+  case lltok::kw_none: ID.Kind = ValID::t_None; break;
+
+  case lltok::lbrace: {
+    // ValID ::= '{' ConstVector '}'
+    Lex.Lex();
+    SmallVector<Constant*, 16> Elts;
+    if (ParseGlobalValueVector(Elts) ||
+        ParseToken(lltok::rbrace, "expected end of struct constant"))
+      return true;
+
+    ID.ConstantStructElts = make_unique<Constant *[]>(Elts.size());
+    ID.UIntVal = Elts.size();
+    memcpy(ID.ConstantStructElts.get(), Elts.data(),
+           Elts.size() * sizeof(Elts[0]));
+    ID.Kind = ValID::t_ConstantStruct;
+    return false;
+  }
+  case lltok::less: {
+    // ValID ::= '<' ConstVector '>'         --> Vector.
+    // ValID ::= '<' '{' ConstVector '}' '>' --> Packed Struct.
+    Lex.Lex();
+    bool isPackedStruct = EatIfPresent(lltok::lbrace);
+
+    SmallVector<Constant*, 16> Elts;
+    LocTy FirstEltLoc = Lex.getLoc();
+    if (ParseGlobalValueVector(Elts) ||
+        (isPackedStruct &&
+         ParseToken(lltok::rbrace, "expected end of packed struct")) ||
+        ParseToken(lltok::greater, "expected end of constant"))
+      return true;
+
+    if (isPackedStruct) {
+      ID.ConstantStructElts = make_unique<Constant *[]>(Elts.size());
+      memcpy(ID.ConstantStructElts.get(), Elts.data(),
+             Elts.size() * sizeof(Elts[0]));
+      ID.UIntVal = Elts.size();
+      ID.Kind = ValID::t_PackedConstantStruct;
+      return false;
+    }
+
+    if (Elts.empty())
+      return Error(ID.Loc, "constant vector must not be empty");
+
+    if (!Elts[0]->getType()->isIntegerTy() &&
+        !Elts[0]->getType()->isFloatingPointTy() &&
+        !Elts[0]->getType()->isPointerTy())
+      return Error(FirstEltLoc,
+            "vector elements must have integer, pointer or floating point type");
+
+    // Verify that all the vector elements have the same type.
+    for (unsigned i = 1, e = Elts.size(); i != e; ++i)
+      if (Elts[i]->getType() != Elts[0]->getType())
+        return Error(FirstEltLoc,
+                     "vector element #" + Twine(i) +
+                    " is not of type '" + getTypeString(Elts[0]->getType()));
+
+    ID.ConstantVal = ConstantVector::get(Elts);
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+  case lltok::lsquare: {   // Array Constant
+    Lex.Lex();
+    SmallVector<Constant*, 16> Elts;
+    LocTy FirstEltLoc = Lex.getLoc();
+    if (ParseGlobalValueVector(Elts) ||
+        ParseToken(lltok::rsquare, "expected end of array constant"))
+      return true;
+
+    // Handle empty element.
+    if (Elts.empty()) {
+      // Use undef instead of an array because it's inconvenient to determine
+      // the element type at this point, there being no elements to examine.
+      ID.Kind = ValID::t_EmptyArray;
+      return false;
+    }
+
+    if (!Elts[0]->getType()->isFirstClassType())
+      return Error(FirstEltLoc, "invalid array element type: " +
+                   getTypeString(Elts[0]->getType()));
+
+    ArrayType *ATy = ArrayType::get(Elts[0]->getType(), Elts.size());
+
+    // Verify all elements are correct type!
+    for (unsigned i = 0, e = Elts.size(); i != e; ++i) {
+      if (Elts[i]->getType() != Elts[0]->getType())
+        return Error(FirstEltLoc,
+                     "array element #" + Twine(i) +
+                     " is not of type '" + getTypeString(Elts[0]->getType()));
+    }
+
+    ID.ConstantVal = ConstantArray::get(ATy, Elts);
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+  case lltok::kw_c:  // c "foo"
+    Lex.Lex();
+    ID.ConstantVal = ConstantDataArray::getString(Context, Lex.getStrVal(),
+                                                  false);
+    if (ParseToken(lltok::StringConstant, "expected string")) return true;
+    ID.Kind = ValID::t_Constant;
+    return false;
+
+  case lltok::kw_asm: {
+    // ValID ::= 'asm' SideEffect? AlignStack? IntelDialect? STRINGCONSTANT ','
+    //             STRINGCONSTANT
+    bool HasSideEffect, AlignStack, AsmDialect;
+    Lex.Lex();
+    if (ParseOptionalToken(lltok::kw_sideeffect, HasSideEffect) ||
+        ParseOptionalToken(lltok::kw_alignstack, AlignStack) ||
+        ParseOptionalToken(lltok::kw_inteldialect, AsmDialect) ||
+        ParseStringConstant(ID.StrVal) ||
+        ParseToken(lltok::comma, "expected comma in inline asm expression") ||
+        ParseToken(lltok::StringConstant, "expected constraint string"))
+      return true;
+    ID.StrVal2 = Lex.getStrVal();
+    ID.UIntVal = unsigned(HasSideEffect) | (unsigned(AlignStack)<<1) |
+      (unsigned(AsmDialect)<<2);
+    ID.Kind = ValID::t_InlineAsm;
+    return false;
+  }
+
+  case lltok::kw_blockaddress: {
+    // ValID ::= 'blockaddress' '(' @foo ',' %bar ')'
+    Lex.Lex();
+
+    ValID Fn, Label;
+
+    if (ParseToken(lltok::lparen, "expected '(' in block address expression") ||
+        ParseValID(Fn) ||
+        ParseToken(lltok::comma, "expected comma in block address expression")||
+        ParseValID(Label) ||
+        ParseToken(lltok::rparen, "expected ')' in block address expression"))
+      return true;
+
+    if (Fn.Kind != ValID::t_GlobalID && Fn.Kind != ValID::t_GlobalName)
+      return Error(Fn.Loc, "expected function name in blockaddress");
+    if (Label.Kind != ValID::t_LocalID && Label.Kind != ValID::t_LocalName)
+      return Error(Label.Loc, "expected basic block name in blockaddress");
+
+    // Try to find the function (but skip it if it's forward-referenced).
+    GlobalValue *GV = nullptr;
+    if (Fn.Kind == ValID::t_GlobalID) {
+      if (Fn.UIntVal < NumberedVals.size())
+        GV = NumberedVals[Fn.UIntVal];
+    } else if (!ForwardRefVals.count(Fn.StrVal)) {
+      GV = M->getNamedValue(Fn.StrVal);
+    }
+    Function *F = nullptr;
+    if (GV) {
+      // Confirm that it's actually a function with a definition.
+      if (!isa<Function>(GV))
+        return Error(Fn.Loc, "expected function name in blockaddress");
+      F = cast<Function>(GV);
+      if (F->isDeclaration())
+        return Error(Fn.Loc, "cannot take blockaddress inside a declaration");
+    }
+
+    if (!F) {
+      // Make a global variable as a placeholder for this reference.
+      GlobalValue *&FwdRef =
+          ForwardRefBlockAddresses.insert(std::make_pair(
+                                              std::move(Fn),
+                                              std::map<ValID, GlobalValue *>()))
+              .first->second.insert(std::make_pair(std::move(Label), nullptr))
+              .first->second;
+      if (!FwdRef)
+        FwdRef = new GlobalVariable(*M, Type::getInt8Ty(Context), false,
+                                    GlobalValue::InternalLinkage, nullptr, "");
+      ID.ConstantVal = FwdRef;
+      ID.Kind = ValID::t_Constant;
+      return false;
+    }
+
+    // We found the function; now find the basic block.  Don't use PFS, since we
+    // might be inside a constant expression.
+    BasicBlock *BB;
+    if (BlockAddressPFS && F == &BlockAddressPFS->getFunction()) {
+      if (Label.Kind == ValID::t_LocalID)
+        BB = BlockAddressPFS->GetBB(Label.UIntVal, Label.Loc);
+      else
+        BB = BlockAddressPFS->GetBB(Label.StrVal, Label.Loc);
+      if (!BB)
+        return Error(Label.Loc, "referenced value is not a basic block");
+    } else {
+      if (Label.Kind == ValID::t_LocalID)
+        return Error(Label.Loc, "cannot take address of numeric label after "
+                                "the function is defined");
+      BB = dyn_cast_or_null<BasicBlock>(
+          F->getValueSymbolTable()->lookup(Label.StrVal));
+      if (!BB)
+        return Error(Label.Loc, "referenced value is not a basic block");
+    }
+
+    ID.ConstantVal = BlockAddress::get(F, BB);
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+
+  case lltok::kw_trunc:
+  case lltok::kw_zext:
+  case lltok::kw_sext:
+  case lltok::kw_fptrunc:
+  case lltok::kw_fpext:
+  case lltok::kw_bitcast:
+  case lltok::kw_addrspacecast:
+  case lltok::kw_uitofp:
+  case lltok::kw_sitofp:
+  case lltok::kw_fptoui:
+  case lltok::kw_fptosi:
+  case lltok::kw_inttoptr:
+  case lltok::kw_ptrtoint: {
+    unsigned Opc = Lex.getUIntVal();
+    Type *DestTy = nullptr;
+    Constant *SrcVal;
+    Lex.Lex();
+    if (ParseToken(lltok::lparen, "expected '(' after constantexpr cast") ||
+        ParseGlobalTypeAndValue(SrcVal) ||
+        ParseToken(lltok::kw_to, "expected 'to' in constantexpr cast") ||
+        ParseType(DestTy) ||
+        ParseToken(lltok::rparen, "expected ')' at end of constantexpr cast"))
+      return true;
+    if (!CastInst::castIsValid((Instruction::CastOps)Opc, SrcVal, DestTy))
+      return Error(ID.Loc, "invalid cast opcode for cast from '" +
+                   getTypeString(SrcVal->getType()) + "' to '" +
+                   getTypeString(DestTy) + "'");
+    ID.ConstantVal = ConstantExpr::getCast((Instruction::CastOps)Opc,
+                                                 SrcVal, DestTy);
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+  case lltok::kw_extractvalue: {
+    Lex.Lex();
+    Constant *Val;
+    SmallVector<unsigned, 4> Indices;
+    if (ParseToken(lltok::lparen, "expected '(' in extractvalue constantexpr")||
+        ParseGlobalTypeAndValue(Val) ||
+        ParseIndexList(Indices) ||
+        ParseToken(lltok::rparen, "expected ')' in extractvalue constantexpr"))
+      return true;
+
+    if (!Val->getType()->isAggregateType())
+      return Error(ID.Loc, "extractvalue operand must be aggregate type");
+    if (!ExtractValueInst::getIndexedType(Val->getType(), Indices))
+      return Error(ID.Loc, "invalid indices for extractvalue");
+    ID.ConstantVal = ConstantExpr::getExtractValue(Val, Indices);
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+  case lltok::kw_insertvalue: {
+    Lex.Lex();
+    Constant *Val0, *Val1;
+    SmallVector<unsigned, 4> Indices;
+    if (ParseToken(lltok::lparen, "expected '(' in insertvalue constantexpr")||
+        ParseGlobalTypeAndValue(Val0) ||
+        ParseToken(lltok::comma, "expected comma in insertvalue constantexpr")||
+        ParseGlobalTypeAndValue(Val1) ||
+        ParseIndexList(Indices) ||
+        ParseToken(lltok::rparen, "expected ')' in insertvalue constantexpr"))
+      return true;
+    if (!Val0->getType()->isAggregateType())
+      return Error(ID.Loc, "insertvalue operand must be aggregate type");
+    Type *IndexedType =
+        ExtractValueInst::getIndexedType(Val0->getType(), Indices);
+    if (!IndexedType)
+      return Error(ID.Loc, "invalid indices for insertvalue");
+    if (IndexedType != Val1->getType())
+      return Error(ID.Loc, "insertvalue operand and field disagree in type: '" +
+                               getTypeString(Val1->getType()) +
+                               "' instead of '" + getTypeString(IndexedType) +
+                               "'");
+    ID.ConstantVal = ConstantExpr::getInsertValue(Val0, Val1, Indices);
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+  case lltok::kw_icmp:
+  case lltok::kw_fcmp: {
+    unsigned PredVal, Opc = Lex.getUIntVal();
+    Constant *Val0, *Val1;
+    Lex.Lex();
+    if (ParseCmpPredicate(PredVal, Opc) ||
+        ParseToken(lltok::lparen, "expected '(' in compare constantexpr") ||
+        ParseGlobalTypeAndValue(Val0) ||
+        ParseToken(lltok::comma, "expected comma in compare constantexpr") ||
+        ParseGlobalTypeAndValue(Val1) ||
+        ParseToken(lltok::rparen, "expected ')' in compare constantexpr"))
+      return true;
+
+    if (Val0->getType() != Val1->getType())
+      return Error(ID.Loc, "compare operands must have the same type");
+
+    CmpInst::Predicate Pred = (CmpInst::Predicate)PredVal;
+
+    if (Opc == Instruction::FCmp) {
+      if (!Val0->getType()->isFPOrFPVectorTy())
+        return Error(ID.Loc, "fcmp requires floating point operands");
+      ID.ConstantVal = ConstantExpr::getFCmp(Pred, Val0, Val1);
+    } else {
+      assert(Opc == Instruction::ICmp && "Unexpected opcode for CmpInst!");
+      if (!Val0->getType()->isIntOrIntVectorTy() &&
+          !Val0->getType()->getScalarType()->isPointerTy())
+        return Error(ID.Loc, "icmp requires pointer or integer operands");
+      ID.ConstantVal = ConstantExpr::getICmp(Pred, Val0, Val1);
+    }
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+
+  // Binary Operators.
+  case lltok::kw_add:
+  case lltok::kw_fadd:
+  case lltok::kw_sub:
+  case lltok::kw_fsub:
+  case lltok::kw_mul:
+  case lltok::kw_fmul:
+  case lltok::kw_udiv:
+  case lltok::kw_sdiv:
+  case lltok::kw_fdiv:
+  case lltok::kw_urem:
+  case lltok::kw_srem:
+  case lltok::kw_frem:
+  case lltok::kw_shl:
+  case lltok::kw_lshr:
+  case lltok::kw_ashr: {
+    bool NUW = false;
+    bool NSW = false;
+    bool Exact = false;
+    unsigned Opc = Lex.getUIntVal();
+    Constant *Val0, *Val1;
+    Lex.Lex();
+    LocTy ModifierLoc = Lex.getLoc();
+    if (Opc == Instruction::Add || Opc == Instruction::Sub ||
+        Opc == Instruction::Mul || Opc == Instruction::Shl) {
+      if (EatIfPresent(lltok::kw_nuw))
+        NUW = true;
+      if (EatIfPresent(lltok::kw_nsw)) {
+        NSW = true;
+        if (EatIfPresent(lltok::kw_nuw))
+          NUW = true;
+      }
+    } else if (Opc == Instruction::SDiv || Opc == Instruction::UDiv ||
+               Opc == Instruction::LShr || Opc == Instruction::AShr) {
+      if (EatIfPresent(lltok::kw_exact))
+        Exact = true;
+    }
+    if (ParseToken(lltok::lparen, "expected '(' in binary constantexpr") ||
+        ParseGlobalTypeAndValue(Val0) ||
+        ParseToken(lltok::comma, "expected comma in binary constantexpr") ||
+        ParseGlobalTypeAndValue(Val1) ||
+        ParseToken(lltok::rparen, "expected ')' in binary constantexpr"))
+      return true;
+    if (Val0->getType() != Val1->getType())
+      return Error(ID.Loc, "operands of constexpr must have same type");
+    if (!Val0->getType()->isIntOrIntVectorTy()) {
+      if (NUW)
+        return Error(ModifierLoc, "nuw only applies to integer operations");
+      if (NSW)
+        return Error(ModifierLoc, "nsw only applies to integer operations");
+    }
+    // Check that the type is valid for the operator.
+    switch (Opc) {
+    case Instruction::Add:
+    case Instruction::Sub:
+    case Instruction::Mul:
+    case Instruction::UDiv:
+    case Instruction::SDiv:
+    case Instruction::URem:
+    case Instruction::SRem:
+    case Instruction::Shl:
+    case Instruction::AShr:
+    case Instruction::LShr:
+      if (!Val0->getType()->isIntOrIntVectorTy())
+        return Error(ID.Loc, "constexpr requires integer operands");
+      break;
+    case Instruction::FAdd:
+    case Instruction::FSub:
+    case Instruction::FMul:
+    case Instruction::FDiv:
+    case Instruction::FRem:
+      if (!Val0->getType()->isFPOrFPVectorTy())
+        return Error(ID.Loc, "constexpr requires fp operands");
+      break;
+    default: llvm_unreachable("Unknown binary operator!");
+    }
+    unsigned Flags = 0;
+    if (NUW)   Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
+    if (NSW)   Flags |= OverflowingBinaryOperator::NoSignedWrap;
+    if (Exact) Flags |= PossiblyExactOperator::IsExact;
+    Constant *C = ConstantExpr::get(Opc, Val0, Val1, Flags);
+    ID.ConstantVal = C;
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+
+  // Logical Operations
+  case lltok::kw_and:
+  case lltok::kw_or:
+  case lltok::kw_xor: {
+    unsigned Opc = Lex.getUIntVal();
+    Constant *Val0, *Val1;
+    Lex.Lex();
+    if (ParseToken(lltok::lparen, "expected '(' in logical constantexpr") ||
+        ParseGlobalTypeAndValue(Val0) ||
+        ParseToken(lltok::comma, "expected comma in logical constantexpr") ||
+        ParseGlobalTypeAndValue(Val1) ||
+        ParseToken(lltok::rparen, "expected ')' in logical constantexpr"))
+      return true;
+    if (Val0->getType() != Val1->getType())
+      return Error(ID.Loc, "operands of constexpr must have same type");
+    if (!Val0->getType()->isIntOrIntVectorTy())
+      return Error(ID.Loc,
+                   "constexpr requires integer or integer vector operands");
+    ID.ConstantVal = ConstantExpr::get(Opc, Val0, Val1);
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+
+  case lltok::kw_getelementptr:
+  case lltok::kw_shufflevector:
+  case lltok::kw_insertelement:
+  case lltok::kw_extractelement:
+  case lltok::kw_select: {
+    unsigned Opc = Lex.getUIntVal();
+    SmallVector<Constant*, 16> Elts;
+    bool InBounds = false;
+    Type *Ty;
+    Lex.Lex();
+
+    if (Opc == Instruction::GetElementPtr)
+      InBounds = EatIfPresent(lltok::kw_inbounds);
+
+    if (ParseToken(lltok::lparen, "expected '(' in constantexpr"))
+      return true;
+
+    LocTy ExplicitTypeLoc = Lex.getLoc();
+    if (Opc == Instruction::GetElementPtr) {
+      if (ParseType(Ty) ||
+          ParseToken(lltok::comma, "expected comma after getelementptr's type"))
+        return true;
+    }
+
+    Optional<unsigned> InRangeOp;
+    if (ParseGlobalValueVector(
+            Elts, Opc == Instruction::GetElementPtr ? &InRangeOp : nullptr) ||
+        ParseToken(lltok::rparen, "expected ')' in constantexpr"))
+      return true;
+
+    if (Opc == Instruction::GetElementPtr) {
+      if (Elts.size() == 0 ||
+          !Elts[0]->getType()->getScalarType()->isPointerTy())
+        return Error(ID.Loc, "base of getelementptr must be a pointer");
+
+      Type *BaseType = Elts[0]->getType();
+      auto *BasePointerType = cast<PointerType>(BaseType->getScalarType());
+      if (Ty != BasePointerType->getElementType())
+        return Error(
+            ExplicitTypeLoc,
+            "explicit pointee type doesn't match operand's pointee type");
+
+      unsigned GEPWidth =
+          BaseType->isVectorTy() ? BaseType->getVectorNumElements() : 0;
+
+      ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end());
+      for (Constant *Val : Indices) {
+        Type *ValTy = Val->getType();
+        if (!ValTy->getScalarType()->isIntegerTy())
+          return Error(ID.Loc, "getelementptr index must be an integer");
+        if (ValTy->isVectorTy()) {
+          unsigned ValNumEl = ValTy->getVectorNumElements();
+          if (GEPWidth && (ValNumEl != GEPWidth))
+            return Error(
+                ID.Loc,
+                "getelementptr vector index has a wrong number of elements");
+          // GEPWidth may have been unknown because the base is a scalar,
+          // but it is known now.
+          GEPWidth = ValNumEl;
+        }
+      }
+
+      SmallPtrSet<Type*, 4> Visited;
+      if (!Indices.empty() && !Ty->isSized(&Visited))
+        return Error(ID.Loc, "base element of getelementptr must be sized");
+
+      if (!GetElementPtrInst::getIndexedType(Ty, Indices))
+        return Error(ID.Loc, "invalid getelementptr indices");
+
+      if (InRangeOp) {
+        if (*InRangeOp == 0)
+          return Error(ID.Loc,
+                       "inrange keyword may not appear on pointer operand");
+        --*InRangeOp;
+      }
+
+      ID.ConstantVal = ConstantExpr::getGetElementPtr(Ty, Elts[0], Indices,
+                                                      InBounds, InRangeOp);
+    } else if (Opc == Instruction::Select) {
+      if (Elts.size() != 3)
+        return Error(ID.Loc, "expected three operands to select");
+      if (const char *Reason = SelectInst::areInvalidOperands(Elts[0], Elts[1],
+                                                              Elts[2]))
+        return Error(ID.Loc, Reason);
+      ID.ConstantVal = ConstantExpr::getSelect(Elts[0], Elts[1], Elts[2]);
+    } else if (Opc == Instruction::ShuffleVector) {
+      if (Elts.size() != 3)
+        return Error(ID.Loc, "expected three operands to shufflevector");
+      if (!ShuffleVectorInst::isValidOperands(Elts[0], Elts[1], Elts[2]))
+        return Error(ID.Loc, "invalid operands to shufflevector");
+      ID.ConstantVal =
+                 ConstantExpr::getShuffleVector(Elts[0], Elts[1],Elts[2]);
+    } else if (Opc == Instruction::ExtractElement) {
+      if (Elts.size() != 2)
+        return Error(ID.Loc, "expected two operands to extractelement");
+      if (!ExtractElementInst::isValidOperands(Elts[0], Elts[1]))
+        return Error(ID.Loc, "invalid extractelement operands");
+      ID.ConstantVal = ConstantExpr::getExtractElement(Elts[0], Elts[1]);
+    } else {
+      assert(Opc == Instruction::InsertElement && "Unknown opcode");
+      if (Elts.size() != 3)
+      return Error(ID.Loc, "expected three operands to insertelement");
+      if (!InsertElementInst::isValidOperands(Elts[0], Elts[1], Elts[2]))
+        return Error(ID.Loc, "invalid insertelement operands");
+      ID.ConstantVal =
+                 ConstantExpr::getInsertElement(Elts[0], Elts[1],Elts[2]);
+    }
+
+    ID.Kind = ValID::t_Constant;
+    return false;
+  }
+  }
+
+  Lex.Lex();
+  return false;
+}
+
+/// ParseGlobalValue - Parse a global value with the specified type.
+bool LLParser::ParseGlobalValue(Type *Ty, Constant *&C) {
+  C = nullptr;
+  ValID ID;
+  Value *V = nullptr;
+  bool Parsed = ParseValID(ID) ||
+                ConvertValIDToValue(Ty, ID, V, nullptr);
+  if (V && !(C = dyn_cast<Constant>(V)))
+    return Error(ID.Loc, "global values must be constants");
+  return Parsed;
+}
+
+bool LLParser::ParseGlobalTypeAndValue(Constant *&V) {
+  Type *Ty = nullptr;
+  return ParseType(Ty) ||
+         ParseGlobalValue(Ty, V);
+}
+
+bool LLParser::parseOptionalComdat(StringRef GlobalName, Comdat *&C) {
+  C = nullptr;
+
+  LocTy KwLoc = Lex.getLoc();
+  if (!EatIfPresent(lltok::kw_comdat))
+    return false;
+
+  if (EatIfPresent(lltok::lparen)) {
+    if (Lex.getKind() != lltok::ComdatVar)
+      return TokError("expected comdat variable");
+    C = getComdat(Lex.getStrVal(), Lex.getLoc());
+    Lex.Lex();
+    if (ParseToken(lltok::rparen, "expected ')' after comdat var"))
+      return true;
+  } else {
+    if (GlobalName.empty())
+      return TokError("comdat cannot be unnamed");
+    C = getComdat(GlobalName, KwLoc);
+  }
+
+  return false;
+}
+
+/// ParseGlobalValueVector
+///   ::= /*empty*/
+///   ::= [inrange] TypeAndValue (',' [inrange] TypeAndValue)*
+bool LLParser::ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
+                                      Optional<unsigned> *InRangeOp) {
+  // Empty list.
+  if (Lex.getKind() == lltok::rbrace ||
+      Lex.getKind() == lltok::rsquare ||
+      Lex.getKind() == lltok::greater ||
+      Lex.getKind() == lltok::rparen)
+    return false;
+
+  do {
+    if (InRangeOp && !*InRangeOp && EatIfPresent(lltok::kw_inrange))
+      *InRangeOp = Elts.size();
+
+    Constant *C;
+    if (ParseGlobalTypeAndValue(C)) return true;
+    Elts.push_back(C);
+  } while (EatIfPresent(lltok::comma));
+
+  return false;
+}
+
+bool LLParser::ParseMDTuple(MDNode *&MD, bool IsDistinct) {
+  SmallVector<Metadata *, 16> Elts;
+  if (ParseMDNodeVector(Elts))
+    return true;
+
+  MD = (IsDistinct ? MDTuple::getDistinct : MDTuple::get)(Context, Elts);
+  return false;
+}
+
+/// MDNode:
+///  ::= !{ ... }
+///  ::= !7
+///  ::= !DILocation(...)
+bool LLParser::ParseMDNode(MDNode *&N) {
+  if (Lex.getKind() == lltok::MetadataVar)
+    return ParseSpecializedMDNode(N);
+
+  return ParseToken(lltok::exclaim, "expected '!' here") ||
+         ParseMDNodeTail(N);
+}
+
+bool LLParser::ParseMDNodeTail(MDNode *&N) {
+  // !{ ... }
+  if (Lex.getKind() == lltok::lbrace)
+    return ParseMDTuple(N);
+
+  // !42
+  return ParseMDNodeID(N);
+}
+
+namespace {
+
+/// Structure to represent an optional metadata field.
+template <class FieldTy> struct MDFieldImpl {
+  typedef MDFieldImpl ImplTy;
+  FieldTy Val;
+  bool Seen;
+
+  void assign(FieldTy Val) {
+    Seen = true;
+    this->Val = std::move(Val);
+  }
+
+  explicit MDFieldImpl(FieldTy Default)
+      : Val(std::move(Default)), Seen(false) {}
+};
+
+struct MDUnsignedField : public MDFieldImpl<uint64_t> {
+  uint64_t Max;
+
+  MDUnsignedField(uint64_t Default = 0, uint64_t Max = UINT64_MAX)
+      : ImplTy(Default), Max(Max) {}
+};
+
+struct LineField : public MDUnsignedField {
+  LineField() : MDUnsignedField(0, UINT32_MAX) {}
+};
+
+struct ColumnField : public MDUnsignedField {
+  ColumnField() : MDUnsignedField(0, UINT16_MAX) {}
+};
+
+struct DwarfTagField : public MDUnsignedField {
+  DwarfTagField() : MDUnsignedField(0, dwarf::DW_TAG_hi_user) {}
+  DwarfTagField(dwarf::Tag DefaultTag)
+      : MDUnsignedField(DefaultTag, dwarf::DW_TAG_hi_user) {}
+};
+
+struct DwarfMacinfoTypeField : public MDUnsignedField {
+  DwarfMacinfoTypeField() : MDUnsignedField(0, dwarf::DW_MACINFO_vendor_ext) {}
+  DwarfMacinfoTypeField(dwarf::MacinfoRecordType DefaultType)
+    : MDUnsignedField(DefaultType, dwarf::DW_MACINFO_vendor_ext) {}
+};
+
+struct DwarfAttEncodingField : public MDUnsignedField {
+  DwarfAttEncodingField() : MDUnsignedField(0, dwarf::DW_ATE_hi_user) {}
+};
+
+struct DwarfVirtualityField : public MDUnsignedField {
+  DwarfVirtualityField() : MDUnsignedField(0, dwarf::DW_VIRTUALITY_max) {}
+};
+
+struct DwarfLangField : public MDUnsignedField {
+  DwarfLangField() : MDUnsignedField(0, dwarf::DW_LANG_hi_user) {}
+};
+
+struct DwarfCCField : public MDUnsignedField {
+  DwarfCCField() : MDUnsignedField(0, dwarf::DW_CC_hi_user) {}
+};
+
+struct EmissionKindField : public MDUnsignedField {
+  EmissionKindField() : MDUnsignedField(0, DICompileUnit::LastEmissionKind) {}
+};
+
+struct DIFlagField : public MDFieldImpl<DINode::DIFlags> {
+  DIFlagField() : MDFieldImpl(DINode::FlagZero) {}
+};
+
+struct MDSignedField : public MDFieldImpl<int64_t> {
+  int64_t Min;
+  int64_t Max;
+
+  MDSignedField(int64_t Default = 0)
+      : ImplTy(Default), Min(INT64_MIN), Max(INT64_MAX) {}
+  MDSignedField(int64_t Default, int64_t Min, int64_t Max)
+      : ImplTy(Default), Min(Min), Max(Max) {}
+};
+
+struct MDBoolField : public MDFieldImpl<bool> {
+  MDBoolField(bool Default = false) : ImplTy(Default) {}
+};
+
+struct MDField : public MDFieldImpl<Metadata *> {
+  bool AllowNull;
+
+  MDField(bool AllowNull = true) : ImplTy(nullptr), AllowNull(AllowNull) {}
+};
+
+struct MDConstant : public MDFieldImpl<ConstantAsMetadata *> {
+  MDConstant() : ImplTy(nullptr) {}
+};
+
+struct MDStringField : public MDFieldImpl<MDString *> {
+  bool AllowEmpty;
+  MDStringField(bool AllowEmpty = true)
+      : ImplTy(nullptr), AllowEmpty(AllowEmpty) {}
+};
+
+struct MDFieldList : public MDFieldImpl<SmallVector<Metadata *, 4>> {
+  MDFieldList() : ImplTy(SmallVector<Metadata *, 4>()) {}
+};
+
+struct ChecksumKindField : public MDFieldImpl<DIFile::ChecksumKind> {
+  ChecksumKindField() : ImplTy(DIFile::CSK_None) {}
+  ChecksumKindField(DIFile::ChecksumKind CSKind) : ImplTy(CSKind) {}
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
+                            MDUnsignedField &Result) {
+  if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned())
+    return TokError("expected unsigned integer");
+
+  auto &U = Lex.getAPSIntVal();
+  if (U.ugt(Result.Max))
+    return TokError("value for '" + Name + "' too large, limit is " +
+                    Twine(Result.Max));
+  Result.assign(U.getZExtValue());
+  assert(Result.Val <= Result.Max && "Expected value in range");
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, LineField &Result) {
+  return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+}
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, ColumnField &Result) {
+  return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, DwarfTagField &Result) {
+  if (Lex.getKind() == lltok::APSInt)
+    return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+
+  if (Lex.getKind() != lltok::DwarfTag)
+    return TokError("expected DWARF tag");
+
+  unsigned Tag = dwarf::getTag(Lex.getStrVal());
+  if (Tag == dwarf::DW_TAG_invalid)
+    return TokError("invalid DWARF tag" + Twine(" '") + Lex.getStrVal() + "'");
+  assert(Tag <= Result.Max && "Expected valid DWARF tag");
+
+  Result.assign(Tag);
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
+                            DwarfMacinfoTypeField &Result) {
+  if (Lex.getKind() == lltok::APSInt)
+    return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+
+  if (Lex.getKind() != lltok::DwarfMacinfo)
+    return TokError("expected DWARF macinfo type");
+
+  unsigned Macinfo = dwarf::getMacinfo(Lex.getStrVal());
+  if (Macinfo == dwarf::DW_MACINFO_invalid)
+    return TokError(
+        "invalid DWARF macinfo type" + Twine(" '") + Lex.getStrVal() + "'");
+  assert(Macinfo <= Result.Max && "Expected valid DWARF macinfo type");
+
+  Result.assign(Macinfo);
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
+                            DwarfVirtualityField &Result) {
+  if (Lex.getKind() == lltok::APSInt)
+    return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+
+  if (Lex.getKind() != lltok::DwarfVirtuality)
+    return TokError("expected DWARF virtuality code");
+
+  unsigned Virtuality = dwarf::getVirtuality(Lex.getStrVal());
+  if (Virtuality == dwarf::DW_VIRTUALITY_invalid)
+    return TokError("invalid DWARF virtuality code" + Twine(" '") +
+                    Lex.getStrVal() + "'");
+  assert(Virtuality <= Result.Max && "Expected valid DWARF virtuality code");
+  Result.assign(Virtuality);
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, DwarfLangField &Result) {
+  if (Lex.getKind() == lltok::APSInt)
+    return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+
+  if (Lex.getKind() != lltok::DwarfLang)
+    return TokError("expected DWARF language");
+
+  unsigned Lang = dwarf::getLanguage(Lex.getStrVal());
+  if (!Lang)
+    return TokError("invalid DWARF language" + Twine(" '") + Lex.getStrVal() +
+                    "'");
+  assert(Lang <= Result.Max && "Expected valid DWARF language");
+  Result.assign(Lang);
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, DwarfCCField &Result) {
+  if (Lex.getKind() == lltok::APSInt)
+    return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+
+  if (Lex.getKind() != lltok::DwarfCC)
+    return TokError("expected DWARF calling convention");
+
+  unsigned CC = dwarf::getCallingConvention(Lex.getStrVal());
+  if (!CC)
+    return TokError("invalid DWARF calling convention" + Twine(" '") + Lex.getStrVal() +
+                    "'");
+  assert(CC <= Result.Max && "Expected valid DWARF calling convention");
+  Result.assign(CC);
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, EmissionKindField &Result) {
+  if (Lex.getKind() == lltok::APSInt)
+    return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+
+  if (Lex.getKind() != lltok::EmissionKind)
+    return TokError("expected emission kind");
+
+  auto Kind = DICompileUnit::getEmissionKind(Lex.getStrVal());
+  if (!Kind)
+    return TokError("invalid emission kind" + Twine(" '") + Lex.getStrVal() +
+                    "'");
+  assert(*Kind <= Result.Max && "Expected valid emission kind");
+  Result.assign(*Kind);
+  Lex.Lex();
+  return false;
+}
+  
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
+                            DwarfAttEncodingField &Result) {
+  if (Lex.getKind() == lltok::APSInt)
+    return ParseMDField(Loc, Name, static_cast<MDUnsignedField &>(Result));
+
+  if (Lex.getKind() != lltok::DwarfAttEncoding)
+    return TokError("expected DWARF type attribute encoding");
+
+  unsigned Encoding = dwarf::getAttributeEncoding(Lex.getStrVal());
+  if (!Encoding)
+    return TokError("invalid DWARF type attribute encoding" + Twine(" '") +
+                    Lex.getStrVal() + "'");
+  assert(Encoding <= Result.Max && "Expected valid DWARF language");
+  Result.assign(Encoding);
+  Lex.Lex();
+  return false;
+}
+
+/// DIFlagField
+///  ::= uint32
+///  ::= DIFlagVector
+///  ::= DIFlagVector '|' DIFlagFwdDecl '|' uint32 '|' DIFlagPublic
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, DIFlagField &Result) {
+
+  // Parser for a single flag.
+  auto parseFlag = [&](DINode::DIFlags &Val) {
+    if (Lex.getKind() == lltok::APSInt && !Lex.getAPSIntVal().isSigned()) {
+      uint32_t TempVal = static_cast<uint32_t>(Val);
+      bool Res = ParseUInt32(TempVal);
+      Val = static_cast<DINode::DIFlags>(TempVal);
+      return Res;
+    }
+
+    if (Lex.getKind() != lltok::DIFlag)
+      return TokError("expected debug info flag");
+
+    Val = DINode::getFlag(Lex.getStrVal());
+    if (!Val)
+      return TokError(Twine("invalid debug info flag flag '") +
+                      Lex.getStrVal() + "'");
+    Lex.Lex();
+    return false;
+  };
+
+  // Parse the flags and combine them together.
+  DINode::DIFlags Combined = DINode::FlagZero;
+  do {
+    DINode::DIFlags Val;
+    if (parseFlag(Val))
+      return true;
+    Combined |= Val;
+  } while (EatIfPresent(lltok::bar));
+
+  Result.assign(Combined);
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
+                            MDSignedField &Result) {
+  if (Lex.getKind() != lltok::APSInt)
+    return TokError("expected signed integer");
+
+  auto &S = Lex.getAPSIntVal();
+  if (S < Result.Min)
+    return TokError("value for '" + Name + "' too small, limit is " +
+                    Twine(Result.Min));
+  if (S > Result.Max)
+    return TokError("value for '" + Name + "' too large, limit is " +
+                    Twine(Result.Max));
+  Result.assign(S.getExtValue());
+  assert(Result.Val >= Result.Min && "Expected value in range");
+  assert(Result.Val <= Result.Max && "Expected value in range");
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, MDBoolField &Result) {
+  switch (Lex.getKind()) {
+  default:
+    return TokError("expected 'true' or 'false'");
+  case lltok::kw_true:
+    Result.assign(true);
+    break;
+  case lltok::kw_false:
+    Result.assign(false);
+    break;
+  }
+  Lex.Lex();
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, MDField &Result) {
+  if (Lex.getKind() == lltok::kw_null) {
+    if (!Result.AllowNull)
+      return TokError("'" + Name + "' cannot be null");
+    Lex.Lex();
+    Result.assign(nullptr);
+    return false;
+  }
+
+  Metadata *MD;
+  if (ParseMetadata(MD, nullptr))
+    return true;
+
+  Result.assign(MD);
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, MDStringField &Result) {
+  LocTy ValueLoc = Lex.getLoc();
+  std::string S;
+  if (ParseStringConstant(S))
+    return true;
+
+  if (!Result.AllowEmpty && S.empty())
+    return Error(ValueLoc, "'" + Name + "' cannot be empty");
+
+  Result.assign(S.empty() ? nullptr : MDString::get(Context, S));
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name, MDFieldList &Result) {
+  SmallVector<Metadata *, 4> MDs;
+  if (ParseMDNodeVector(MDs))
+    return true;
+
+  Result.assign(std::move(MDs));
+  return false;
+}
+
+template <>
+bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
+                            ChecksumKindField &Result) {
+  if (Lex.getKind() != lltok::ChecksumKind)
+    return TokError(
+        "invalid checksum kind" + Twine(" '") + Lex.getStrVal() + "'");
+
+  DIFile::ChecksumKind CSKind = DIFile::getChecksumKind(Lex.getStrVal());
+
+  Result.assign(CSKind);
+  Lex.Lex();
+  return false;
+}
+
+} // end namespace llvm
+
+template <class ParserTy>
+bool LLParser::ParseMDFieldsImplBody(ParserTy parseField) {
+  do {
+    if (Lex.getKind() != lltok::LabelStr)
+      return TokError("expected field label here");
+
+    if (parseField())
+      return true;
+  } while (EatIfPresent(lltok::comma));
+
+  return false;
+}
+
+template <class ParserTy>
+bool LLParser::ParseMDFieldsImpl(ParserTy parseField, LocTy &ClosingLoc) {
+  assert(Lex.getKind() == lltok::MetadataVar && "Expected metadata type name");
+  Lex.Lex();
+
+  if (ParseToken(lltok::lparen, "expected '(' here"))
+    return true;
+  if (Lex.getKind() != lltok::rparen)
+    if (ParseMDFieldsImplBody(parseField))
+      return true;
+
+  ClosingLoc = Lex.getLoc();
+  return ParseToken(lltok::rparen, "expected ')' here");
+}
+
+template <class FieldTy>
+bool LLParser::ParseMDField(StringRef Name, FieldTy &Result) {
+  if (Result.Seen)
+    return TokError("field '" + Name + "' cannot be specified more than once");
+
+  LocTy Loc = Lex.getLoc();
+  Lex.Lex();
+  return ParseMDField(Loc, Name, Result);
+}
+
+bool LLParser::ParseSpecializedMDNode(MDNode *&N, bool IsDistinct) {
+  assert(Lex.getKind() == lltok::MetadataVar && "Expected metadata type name");
+
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
+  if (Lex.getStrVal() == #CLASS)                                               \
+    return Parse##CLASS(N, IsDistinct);
+#include "llvm/IR/Metadata.def"
+
+  return TokError("expected metadata type");
+}
+
+#define DECLARE_FIELD(NAME, TYPE, INIT) TYPE NAME INIT
+#define NOP_FIELD(NAME, TYPE, INIT)
+#define REQUIRE_FIELD(NAME, TYPE, INIT)                                        \
+  if (!NAME.Seen)                                                              \
+    return Error(ClosingLoc, "missing required field '" #NAME "'");
+#define PARSE_MD_FIELD(NAME, TYPE, DEFAULT)                                    \
+  if (Lex.getStrVal() == #NAME)                                                \
+    return ParseMDField(#NAME, NAME);
+#define PARSE_MD_FIELDS()                                                      \
+  VISIT_MD_FIELDS(DECLARE_FIELD, DECLARE_FIELD)                                \
+  do {                                                                         \
+    LocTy ClosingLoc;                                                          \
+    if (ParseMDFieldsImpl([&]() -> bool {                                      \
+      VISIT_MD_FIELDS(PARSE_MD_FIELD, PARSE_MD_FIELD)                          \
+      return TokError(Twine("invalid field '") + Lex.getStrVal() + "'");       \
+    }, ClosingLoc))                                                            \
+      return true;                                                             \
+    VISIT_MD_FIELDS(NOP_FIELD, REQUIRE_FIELD)                                  \
+  } while (false)
+#define GET_OR_DISTINCT(CLASS, ARGS)                                           \
+  (IsDistinct ? CLASS::getDistinct ARGS : CLASS::get ARGS)
+
+/// ParseDILocationFields:
+///   ::= !DILocation(line: 43, column: 8, scope: !5, inlinedAt: !6)
+bool LLParser::ParseDILocation(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(column, ColumnField, );                                             \
+  REQUIRED(scope, MDField, (/* AllowNull */ false));                           \
+  OPTIONAL(inlinedAt, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(
+      DILocation, (Context, line.Val, column.Val, scope.Val, inlinedAt.Val));
+  return false;
+}
+
+/// ParseGenericDINode:
+///   ::= !GenericDINode(tag: 15, header: "...", operands: {...})
+bool LLParser::ParseGenericDINode(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(tag, DwarfTagField, );                                              \
+  OPTIONAL(header, MDStringField, );                                           \
+  OPTIONAL(operands, MDFieldList, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(GenericDINode,
+                           (Context, tag.Val, header.Val, operands.Val));
+  return false;
+}
+
+/// ParseDISubrange:
+///   ::= !DISubrange(count: 30, lowerBound: 2)
+bool LLParser::ParseDISubrange(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(count, MDSignedField, (-1, -1, INT64_MAX));                         \
+  OPTIONAL(lowerBound, MDSignedField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DISubrange, (Context, count.Val, lowerBound.Val));
+  return false;
+}
+
+/// ParseDIEnumerator:
+///   ::= !DIEnumerator(value: 30, name: "SomeKind")
+bool LLParser::ParseDIEnumerator(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(name, MDStringField, );                                             \
+  REQUIRED(value, MDSignedField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIEnumerator, (Context, value.Val, name.Val));
+  return false;
+}
+
+/// ParseDIBasicType:
+///   ::= !DIBasicType(tag: DW_TAG_base_type, name: "int", size: 32, align: 32)
+bool LLParser::ParseDIBasicType(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(tag, DwarfTagField, (dwarf::DW_TAG_base_type));                     \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(size, MDUnsignedField, (0, UINT64_MAX));                            \
+  OPTIONAL(align, MDUnsignedField, (0, UINT32_MAX));                           \
+  OPTIONAL(encoding, DwarfAttEncodingField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIBasicType, (Context, tag.Val, name.Val, size.Val,
+                                         align.Val, encoding.Val));
+  return false;
+}
+
+/// ParseDIDerivedType:
+///   ::= !DIDerivedType(tag: DW_TAG_pointer_type, name: "int", file: !0,
+///                      line: 7, scope: !1, baseType: !2, size: 32,
+///                      align: 32, offset: 0, flags: 0, extraData: !3)
+bool LLParser::ParseDIDerivedType(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(tag, DwarfTagField, );                                              \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(scope, MDField, );                                                  \
+  REQUIRED(baseType, MDField, );                                               \
+  OPTIONAL(size, MDUnsignedField, (0, UINT64_MAX));                            \
+  OPTIONAL(align, MDUnsignedField, (0, UINT32_MAX));                           \
+  OPTIONAL(offset, MDUnsignedField, (0, UINT64_MAX));                          \
+  OPTIONAL(flags, DIFlagField, );                                              \
+  OPTIONAL(extraData, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIDerivedType,
+                           (Context, tag.Val, name.Val, file.Val, line.Val,
+                            scope.Val, baseType.Val, size.Val, align.Val,
+                            offset.Val, flags.Val, extraData.Val));
+  return false;
+}
+
+bool LLParser::ParseDICompositeType(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(tag, DwarfTagField, );                                              \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(scope, MDField, );                                                  \
+  OPTIONAL(baseType, MDField, );                                               \
+  OPTIONAL(size, MDUnsignedField, (0, UINT64_MAX));                            \
+  OPTIONAL(align, MDUnsignedField, (0, UINT32_MAX));                           \
+  OPTIONAL(offset, MDUnsignedField, (0, UINT64_MAX));                          \
+  OPTIONAL(flags, DIFlagField, );                                              \
+  OPTIONAL(elements, MDField, );                                               \
+  OPTIONAL(runtimeLang, DwarfLangField, );                                     \
+  OPTIONAL(vtableHolder, MDField, );                                           \
+  OPTIONAL(templateParams, MDField, );                                         \
+  OPTIONAL(identifier, MDStringField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  // If this has an identifier try to build an ODR type.
+  if (identifier.Val)
+    if (auto *CT = DICompositeType::buildODRType(
+            Context, *identifier.Val, tag.Val, name.Val, file.Val, line.Val,
+            scope.Val, baseType.Val, size.Val, align.Val, offset.Val, flags.Val,
+            elements.Val, runtimeLang.Val, vtableHolder.Val,
+            templateParams.Val)) {
+      Result = CT;
+      return false;
+    }
+
+  // Create a new node, and save it in the context if it belongs in the type
+  // map.
+  Result = GET_OR_DISTINCT(
+      DICompositeType,
+      (Context, tag.Val, name.Val, file.Val, line.Val, scope.Val, baseType.Val,
+       size.Val, align.Val, offset.Val, flags.Val, elements.Val,
+       runtimeLang.Val, vtableHolder.Val, templateParams.Val, identifier.Val));
+  return false;
+}
+
+bool LLParser::ParseDISubroutineType(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(flags, DIFlagField, );                                              \
+  OPTIONAL(cc, DwarfCCField, );                                                \
+  REQUIRED(types, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DISubroutineType,
+                           (Context, flags.Val, cc.Val, types.Val));
+  return false;
+}
+
+/// ParseDIFileType:
+///   ::= !DIFileType(filename: "path/to/file", directory: "/path/to/dir"
+///                   checksumkind: CSK_MD5,
+///                   checksum: "000102030405060708090a0b0c0d0e0f")
+bool LLParser::ParseDIFile(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(filename, MDStringField, );                                         \
+  REQUIRED(directory, MDStringField, );                                        \
+  OPTIONAL(checksumkind, ChecksumKindField, );                                 \
+  OPTIONAL(checksum, MDStringField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIFile, (Context, filename.Val, directory.Val,
+                                    checksumkind.Val, checksum.Val));
+  return false;
+}
+
+/// ParseDICompileUnit:
+///   ::= !DICompileUnit(language: DW_LANG_C99, file: !0, producer: "clang",
+///                      isOptimized: true, flags: "-O2", runtimeVersion: 1,
+///                      splitDebugFilename: "abc.debug",
+///                      emissionKind: FullDebug, enums: !1, retainedTypes: !2,
+///                      globals: !4, imports: !5, macros: !6, dwoId: 0x0abcd)
+bool LLParser::ParseDICompileUnit(MDNode *&Result, bool IsDistinct) {
+  if (!IsDistinct)
+    return Lex.Error("missing 'distinct', required for !DICompileUnit");
+
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(language, DwarfLangField, );                                        \
+  REQUIRED(file, MDField, (/* AllowNull */ false));                            \
+  OPTIONAL(producer, MDStringField, );                                         \
+  OPTIONAL(isOptimized, MDBoolField, );                                        \
+  OPTIONAL(flags, MDStringField, );                                            \
+  OPTIONAL(runtimeVersion, MDUnsignedField, (0, UINT32_MAX));                  \
+  OPTIONAL(splitDebugFilename, MDStringField, );                               \
+  OPTIONAL(emissionKind, EmissionKindField, );                                 \
+  OPTIONAL(enums, MDField, );                                                  \
+  OPTIONAL(retainedTypes, MDField, );                                          \
+  OPTIONAL(globals, MDField, );                                                \
+  OPTIONAL(imports, MDField, );                                                \
+  OPTIONAL(macros, MDField, );                                                 \
+  OPTIONAL(dwoId, MDUnsignedField, );                                          \
+  OPTIONAL(splitDebugInlining, MDBoolField, = true);
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = DICompileUnit::getDistinct(
+      Context, language.Val, file.Val, producer.Val, isOptimized.Val, flags.Val,
+      runtimeVersion.Val, splitDebugFilename.Val, emissionKind.Val, enums.Val,
+      retainedTypes.Val, globals.Val, imports.Val, macros.Val, dwoId.Val,
+      splitDebugInlining.Val);
+  return false;
+}
+
+/// ParseDISubprogram:
+///   ::= !DISubprogram(scope: !0, name: "foo", linkageName: "_Zfoo",
+///                     file: !1, line: 7, type: !2, isLocal: false,
+///                     isDefinition: true, scopeLine: 8, containingType: !3,
+///                     virtuality: DW_VIRTUALTIY_pure_virtual,
+///                     virtualIndex: 10, thisAdjustment: 4, flags: 11,
+///                     isOptimized: false, templateParams: !4, declaration: !5,
+///                     variables: !6)
+bool LLParser::ParseDISubprogram(MDNode *&Result, bool IsDistinct) {
+  auto Loc = Lex.getLoc();
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(scope, MDField, );                                                  \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(linkageName, MDStringField, );                                      \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(type, MDField, );                                                   \
+  OPTIONAL(isLocal, MDBoolField, );                                            \
+  OPTIONAL(isDefinition, MDBoolField, (true));                                 \
+  OPTIONAL(scopeLine, LineField, );                                            \
+  OPTIONAL(containingType, MDField, );                                         \
+  OPTIONAL(virtuality, DwarfVirtualityField, );                                \
+  OPTIONAL(virtualIndex, MDUnsignedField, (0, UINT32_MAX));                    \
+  OPTIONAL(thisAdjustment, MDSignedField, (0, INT32_MIN, INT32_MAX));          \
+  OPTIONAL(flags, DIFlagField, );                                              \
+  OPTIONAL(isOptimized, MDBoolField, );                                        \
+  OPTIONAL(unit, MDField, );                                                   \
+  OPTIONAL(templateParams, MDField, );                                         \
+  OPTIONAL(declaration, MDField, );                                            \
+  OPTIONAL(variables, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  if (isDefinition.Val && !IsDistinct)
+    return Lex.Error(
+        Loc,
+        "missing 'distinct', required for !DISubprogram when 'isDefinition'");
+
+  Result = GET_OR_DISTINCT(
+      DISubprogram, (Context, scope.Val, name.Val, linkageName.Val, file.Val,
+                     line.Val, type.Val, isLocal.Val, isDefinition.Val,
+                     scopeLine.Val, containingType.Val, virtuality.Val,
+                     virtualIndex.Val, thisAdjustment.Val, flags.Val,
+                     isOptimized.Val, unit.Val, templateParams.Val,
+                     declaration.Val, variables.Val));
+  return false;
+}
+
+/// ParseDILexicalBlock:
+///   ::= !DILexicalBlock(scope: !0, file: !2, line: 7, column: 9)
+bool LLParser::ParseDILexicalBlock(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(scope, MDField, (/* AllowNull */ false));                           \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(column, ColumnField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(
+      DILexicalBlock, (Context, scope.Val, file.Val, line.Val, column.Val));
+  return false;
+}
+
+/// ParseDILexicalBlockFile:
+///   ::= !DILexicalBlockFile(scope: !0, file: !2, discriminator: 9)
+bool LLParser::ParseDILexicalBlockFile(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(scope, MDField, (/* AllowNull */ false));                           \
+  OPTIONAL(file, MDField, );                                                   \
+  REQUIRED(discriminator, MDUnsignedField, (0, UINT32_MAX));
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DILexicalBlockFile,
+                           (Context, scope.Val, file.Val, discriminator.Val));
+  return false;
+}
+
+/// ParseDINamespace:
+///   ::= !DINamespace(scope: !0, file: !2, name: "SomeNamespace", line: 9)
+bool LLParser::ParseDINamespace(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(scope, MDField, );                                                  \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(exportSymbols, MDBoolField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DINamespace,
+  (Context, scope.Val, file.Val, name.Val, line.Val, exportSymbols.Val));
+  return false;
+}
+
+/// ParseDIMacro:
+///   ::= !DIMacro(macinfo: type, line: 9, name: "SomeMacro", value: "SomeValue")
+bool LLParser::ParseDIMacro(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(type, DwarfMacinfoTypeField, );                                     \
+  OPTIONAL(line, LineField, );                                                 \
+  REQUIRED(name, MDStringField, );                                             \
+  OPTIONAL(value, MDStringField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIMacro,
+                           (Context, type.Val, line.Val, name.Val, value.Val));
+  return false;
+}
+
+/// ParseDIMacroFile:
+///   ::= !DIMacroFile(line: 9, file: !2, nodes: !3)
+bool LLParser::ParseDIMacroFile(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(type, DwarfMacinfoTypeField, (dwarf::DW_MACINFO_start_file));       \
+  OPTIONAL(line, LineField, );                                                 \
+  REQUIRED(file, MDField, );                                                   \
+  OPTIONAL(nodes, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIMacroFile,
+                           (Context, type.Val, line.Val, file.Val, nodes.Val));
+  return false;
+}
+
+/// ParseDIModule:
+///   ::= !DIModule(scope: !0, name: "SomeModule", configMacros: "-DNDEBUG",
+///                 includePath: "/usr/include", isysroot: "/")
+bool LLParser::ParseDIModule(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(scope, MDField, );                                                  \
+  REQUIRED(name, MDStringField, );                                             \
+  OPTIONAL(configMacros, MDStringField, );                                     \
+  OPTIONAL(includePath, MDStringField, );                                      \
+  OPTIONAL(isysroot, MDStringField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIModule, (Context, scope.Val, name.Val,
+                           configMacros.Val, includePath.Val, isysroot.Val));
+  return false;
+}
+
+/// ParseDITemplateTypeParameter:
+///   ::= !DITemplateTypeParameter(name: "Ty", type: !1)
+bool LLParser::ParseDITemplateTypeParameter(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(name, MDStringField, );                                             \
+  REQUIRED(type, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result =
+      GET_OR_DISTINCT(DITemplateTypeParameter, (Context, name.Val, type.Val));
+  return false;
+}
+
+/// ParseDITemplateValueParameter:
+///   ::= !DITemplateValueParameter(tag: DW_TAG_template_value_parameter,
+///                                 name: "V", type: !1, value: i32 7)
+bool LLParser::ParseDITemplateValueParameter(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(tag, DwarfTagField, (dwarf::DW_TAG_template_value_parameter));      \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(type, MDField, );                                                   \
+  REQUIRED(value, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DITemplateValueParameter,
+                           (Context, tag.Val, name.Val, type.Val, value.Val));
+  return false;
+}
+
+/// ParseDIGlobalVariable:
+///   ::= !DIGlobalVariable(scope: !0, name: "foo", linkageName: "foo",
+///                         file: !1, line: 7, type: !2, isLocal: false,
+///                         isDefinition: true, declaration: !3, align: 8)
+bool LLParser::ParseDIGlobalVariable(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(name, MDStringField, (/* AllowEmpty */ false));                     \
+  OPTIONAL(scope, MDField, );                                                  \
+  OPTIONAL(linkageName, MDStringField, );                                      \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(type, MDField, );                                                   \
+  OPTIONAL(isLocal, MDBoolField, );                                            \
+  OPTIONAL(isDefinition, MDBoolField, (true));                                 \
+  OPTIONAL(declaration, MDField, );                                            \
+  OPTIONAL(align, MDUnsignedField, (0, UINT32_MAX));
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIGlobalVariable,
+                           (Context, scope.Val, name.Val, linkageName.Val,
+                            file.Val, line.Val, type.Val, isLocal.Val,
+                            isDefinition.Val, declaration.Val, align.Val));
+  return false;
+}
+
+/// ParseDILocalVariable:
+///   ::= !DILocalVariable(arg: 7, scope: !0, name: "foo",
+///                        file: !1, line: 7, type: !2, arg: 2, flags: 7,
+///                        align: 8)
+///   ::= !DILocalVariable(scope: !0, name: "foo",
+///                        file: !1, line: 7, type: !2, arg: 2, flags: 7,
+///                        align: 8)
+bool LLParser::ParseDILocalVariable(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(scope, MDField, (/* AllowNull */ false));                           \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(arg, MDUnsignedField, (0, UINT16_MAX));                             \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(type, MDField, );                                                   \
+  OPTIONAL(flags, DIFlagField, );                                              \
+  OPTIONAL(align, MDUnsignedField, (0, UINT32_MAX));
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DILocalVariable,
+                           (Context, scope.Val, name.Val, file.Val, line.Val,
+                            type.Val, arg.Val, flags.Val, align.Val));
+  return false;
+}
+
+/// ParseDIExpression:
+///   ::= !DIExpression(0, 7, -1)
+bool LLParser::ParseDIExpression(MDNode *&Result, bool IsDistinct) {
+  assert(Lex.getKind() == lltok::MetadataVar && "Expected metadata type name");
+  Lex.Lex();
+
+  if (ParseToken(lltok::lparen, "expected '(' here"))
+    return true;
+
+  SmallVector<uint64_t, 8> Elements;
+  if (Lex.getKind() != lltok::rparen)
+    do {
+      if (Lex.getKind() == lltok::DwarfOp) {
+        if (unsigned Op = dwarf::getOperationEncoding(Lex.getStrVal())) {
+          Lex.Lex();
+          Elements.push_back(Op);
+          continue;
+        }
+        return TokError(Twine("invalid DWARF op '") + Lex.getStrVal() + "'");
+      }
+
+      if (Lex.getKind() != lltok::APSInt || Lex.getAPSIntVal().isSigned())
+        return TokError("expected unsigned integer");
+
+      auto &U = Lex.getAPSIntVal();
+      if (U.ugt(UINT64_MAX))
+        return TokError("element too large, limit is " + Twine(UINT64_MAX));
+      Elements.push_back(U.getZExtValue());
+      Lex.Lex();
+    } while (EatIfPresent(lltok::comma));
+
+  if (ParseToken(lltok::rparen, "expected ')' here"))
+    return true;
+
+  Result = GET_OR_DISTINCT(DIExpression, (Context, Elements));
+  return false;
+}
+
+/// ParseDIGlobalVariableExpression:
+///   ::= !DIGlobalVariableExpression(var: !0, expr: !1)
+bool LLParser::ParseDIGlobalVariableExpression(MDNode *&Result,
+                                               bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(var, MDField, );                                                    \
+  OPTIONAL(expr, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result =
+      GET_OR_DISTINCT(DIGlobalVariableExpression, (Context, var.Val, expr.Val));
+  return false;
+}
+
+/// ParseDIObjCProperty:
+///   ::= !DIObjCProperty(name: "foo", file: !1, line: 7, setter: "setFoo",
+///                       getter: "getFoo", attributes: 7, type: !2)
+bool LLParser::ParseDIObjCProperty(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  OPTIONAL(name, MDStringField, );                                             \
+  OPTIONAL(file, MDField, );                                                   \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(setter, MDStringField, );                                           \
+  OPTIONAL(getter, MDStringField, );                                           \
+  OPTIONAL(attributes, MDUnsignedField, (0, UINT32_MAX));                      \
+  OPTIONAL(type, MDField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIObjCProperty,
+                           (Context, name.Val, file.Val, line.Val, setter.Val,
+                            getter.Val, attributes.Val, type.Val));
+  return false;
+}
+
+/// ParseDIImportedEntity:
+///   ::= !DIImportedEntity(tag: DW_TAG_imported_module, scope: !0, entity: !1,
+///                         line: 7, name: "foo")
+bool LLParser::ParseDIImportedEntity(MDNode *&Result, bool IsDistinct) {
+#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED)                                    \
+  REQUIRED(tag, DwarfTagField, );                                              \
+  REQUIRED(scope, MDField, );                                                  \
+  OPTIONAL(entity, MDField, );                                                 \
+  OPTIONAL(line, LineField, );                                                 \
+  OPTIONAL(name, MDStringField, );
+  PARSE_MD_FIELDS();
+#undef VISIT_MD_FIELDS
+
+  Result = GET_OR_DISTINCT(DIImportedEntity, (Context, tag.Val, scope.Val,
+                                              entity.Val, line.Val, name.Val));
+  return false;
+}
+
+#undef PARSE_MD_FIELD
+#undef NOP_FIELD
+#undef REQUIRE_FIELD
+#undef DECLARE_FIELD
+
+/// ParseMetadataAsValue
+///  ::= metadata i32 %local
+///  ::= metadata i32 @global
+///  ::= metadata i32 7
+///  ::= metadata !0
+///  ::= metadata !{...}
+///  ::= metadata !"string"
+bool LLParser::ParseMetadataAsValue(Value *&V, PerFunctionState &PFS) {
+  // Note: the type 'metadata' has already been parsed.
+  Metadata *MD;
+  if (ParseMetadata(MD, &PFS))
+    return true;
+
+  V = MetadataAsValue::get(Context, MD);
+  return false;
+}
+
+/// ParseValueAsMetadata
+///  ::= i32 %local
+///  ::= i32 @global
+///  ::= i32 7
+bool LLParser::ParseValueAsMetadata(Metadata *&MD, const Twine &TypeMsg,
+                                    PerFunctionState *PFS) {
+  Type *Ty;
+  LocTy Loc;
+  if (ParseType(Ty, TypeMsg, Loc))
+    return true;
+  if (Ty->isMetadataTy())
+    return Error(Loc, "invalid metadata-value-metadata roundtrip");
+
+  Value *V;
+  if (ParseValue(Ty, V, PFS))
+    return true;
+
+  MD = ValueAsMetadata::get(V);
+  return false;
+}
+
+/// ParseMetadata
+///  ::= i32 %local
+///  ::= i32 @global
+///  ::= i32 7
+///  ::= !42
+///  ::= !{...}
+///  ::= !"string"
+///  ::= !DILocation(...)
+bool LLParser::ParseMetadata(Metadata *&MD, PerFunctionState *PFS) {
+  if (Lex.getKind() == lltok::MetadataVar) {
+    MDNode *N;
+    if (ParseSpecializedMDNode(N))
+      return true;
+    MD = N;
+    return false;
+  }
+
+  // ValueAsMetadata:
+  // <type> <value>
+  if (Lex.getKind() != lltok::exclaim)
+    return ParseValueAsMetadata(MD, "expected metadata operand", PFS);
+
+  // '!'.
+  assert(Lex.getKind() == lltok::exclaim && "Expected '!' here");
+  Lex.Lex();
+
+  // MDString:
+  //   ::= '!' STRINGCONSTANT
+  if (Lex.getKind() == lltok::StringConstant) {
+    MDString *S;
+    if (ParseMDString(S))
+      return true;
+    MD = S;
+    return false;
+  }
+
+  // MDNode:
+  // !{ ... }
+  // !7
+  MDNode *N;
+  if (ParseMDNodeTail(N))
+    return true;
+  MD = N;
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Parsing.
+//===----------------------------------------------------------------------===//
+
+bool LLParser::ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
+                                   PerFunctionState *PFS) {
+  if (Ty->isFunctionTy())
+    return Error(ID.Loc, "functions are not values, refer to them as pointers");
+
+  switch (ID.Kind) {
+  case ValID::t_LocalID:
+    if (!PFS) return Error(ID.Loc, "invalid use of function-local name");
+    V = PFS->GetVal(ID.UIntVal, Ty, ID.Loc);
+    return V == nullptr;
+  case ValID::t_LocalName:
+    if (!PFS) return Error(ID.Loc, "invalid use of function-local name");
+    V = PFS->GetVal(ID.StrVal, Ty, ID.Loc);
+    return V == nullptr;
+  case ValID::t_InlineAsm: {
+    if (!ID.FTy || !InlineAsm::Verify(ID.FTy, ID.StrVal2))
+      return Error(ID.Loc, "invalid type for inline asm constraint string");
+    V = InlineAsm::get(ID.FTy, ID.StrVal, ID.StrVal2, ID.UIntVal & 1,
+                       (ID.UIntVal >> 1) & 1,
+                       (InlineAsm::AsmDialect(ID.UIntVal >> 2)));
+    return false;
+  }
+  case ValID::t_GlobalName:
+    V = GetGlobalVal(ID.StrVal, Ty, ID.Loc);
+    return V == nullptr;
+  case ValID::t_GlobalID:
+    V = GetGlobalVal(ID.UIntVal, Ty, ID.Loc);
+    return V == nullptr;
+  case ValID::t_APSInt:
+    if (!Ty->isIntegerTy())
+      return Error(ID.Loc, "integer constant must have integer type");
+    ID.APSIntVal = ID.APSIntVal.extOrTrunc(Ty->getPrimitiveSizeInBits());
+    V = ConstantInt::get(Context, ID.APSIntVal);
+    return false;
+  case ValID::t_APFloat:
+    if (!Ty->isFloatingPointTy() ||
+        !ConstantFP::isValueValidForType(Ty, ID.APFloatVal))
+      return Error(ID.Loc, "floating point constant invalid for type");
+
+    // The lexer has no type info, so builds all half, float, and double FP
+    // constants as double.  Fix this here.  Long double does not need this.
+    if (&ID.APFloatVal.getSemantics() == &APFloat::IEEEdouble()) {
+      bool Ignored;
+      if (Ty->isHalfTy())
+        ID.APFloatVal.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven,
+                              &Ignored);
+      else if (Ty->isFloatTy())
+        ID.APFloatVal.convert(APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven,
+                              &Ignored);
+    }
+    V = ConstantFP::get(Context, ID.APFloatVal);
+
+    if (V->getType() != Ty)
+      return Error(ID.Loc, "floating point constant does not have type '" +
+                   getTypeString(Ty) + "'");
+
+    return false;
+  case ValID::t_Null:
+    if (!Ty->isPointerTy())
+      return Error(ID.Loc, "null must be a pointer type");
+    V = ConstantPointerNull::get(cast<PointerType>(Ty));
+    return false;
+  case ValID::t_Undef:
+    // FIXME: LabelTy should not be a first-class type.
+    if (!Ty->isFirstClassType() || Ty->isLabelTy())
+      return Error(ID.Loc, "invalid type for undef constant");
+    V = UndefValue::get(Ty);
+    return false;
+  case ValID::t_EmptyArray:
+    if (!Ty->isArrayTy() || cast<ArrayType>(Ty)->getNumElements() != 0)
+      return Error(ID.Loc, "invalid empty array initializer");
+    V = UndefValue::get(Ty);
+    return false;
+  case ValID::t_Zero:
+    // FIXME: LabelTy should not be a first-class type.
+    if (!Ty->isFirstClassType() || Ty->isLabelTy())
+      return Error(ID.Loc, "invalid type for null constant");
+    V = Constant::getNullValue(Ty);
+    return false;
+  case ValID::t_None:
+    if (!Ty->isTokenTy())
+      return Error(ID.Loc, "invalid type for none constant");
+    V = Constant::getNullValue(Ty);
+    return false;
+  case ValID::t_Constant:
+    if (ID.ConstantVal->getType() != Ty)
+      return Error(ID.Loc, "constant expression type mismatch");
+
+    V = ID.ConstantVal;
+    return false;
+  case ValID::t_ConstantStruct:
+  case ValID::t_PackedConstantStruct:
+    if (StructType *ST = dyn_cast<StructType>(Ty)) {
+      if (ST->getNumElements() != ID.UIntVal)
+        return Error(ID.Loc,
+                     "initializer with struct type has wrong # elements");
+      if (ST->isPacked() != (ID.Kind == ValID::t_PackedConstantStruct))
+        return Error(ID.Loc, "packed'ness of initializer and type don't match");
+
+      // Verify that the elements are compatible with the structtype.
+      for (unsigned i = 0, e = ID.UIntVal; i != e; ++i)
+        if (ID.ConstantStructElts[i]->getType() != ST->getElementType(i))
+          return Error(ID.Loc, "element " + Twine(i) +
+                    " of struct initializer doesn't match struct element type");
+
+      V = ConstantStruct::get(
+          ST, makeArrayRef(ID.ConstantStructElts.get(), ID.UIntVal));
+    } else
+      return Error(ID.Loc, "constant expression type mismatch");
+    return false;
+  }
+  llvm_unreachable("Invalid ValID");
+}
+
+bool LLParser::parseConstantValue(Type *Ty, Constant *&C) {
+  C = nullptr;
+  ValID ID;
+  auto Loc = Lex.getLoc();
+  if (ParseValID(ID, /*PFS=*/nullptr))
+    return true;
+  switch (ID.Kind) {
+  case ValID::t_APSInt:
+  case ValID::t_APFloat:
+  case ValID::t_Undef:
+  case ValID::t_Constant:
+  case ValID::t_ConstantStruct:
+  case ValID::t_PackedConstantStruct: {
+    Value *V;
+    if (ConvertValIDToValue(Ty, ID, V, /*PFS=*/nullptr))
+      return true;
+    assert(isa<Constant>(V) && "Expected a constant value");
+    C = cast<Constant>(V);
+    return false;
+  }
+  default:
+    return Error(Loc, "expected a constant value");
+  }
+}
+
+bool LLParser::ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS) {
+  V = nullptr;
+  ValID ID;
+  return ParseValID(ID, PFS) || ConvertValIDToValue(Ty, ID, V, PFS);
+}
+
+bool LLParser::ParseTypeAndValue(Value *&V, PerFunctionState *PFS) {
+  Type *Ty = nullptr;
+  return ParseType(Ty) ||
+         ParseValue(Ty, V, PFS);
+}
+
+bool LLParser::ParseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
+                                      PerFunctionState &PFS) {
+  Value *V;
+  Loc = Lex.getLoc();
+  if (ParseTypeAndValue(V, PFS)) return true;
+  if (!isa<BasicBlock>(V))
+    return Error(Loc, "expected a basic block");
+  BB = cast<BasicBlock>(V);
+  return false;
+}
+
+/// FunctionHeader
+///   ::= OptionalLinkage OptionalVisibility OptionalCallingConv OptRetAttrs
+///       OptUnnamedAddr Type GlobalName '(' ArgList ')' OptFuncAttrs OptSection
+///       OptionalAlign OptGC OptionalPrefix OptionalPrologue OptPersonalityFn
+bool LLParser::ParseFunctionHeader(Function *&Fn, bool isDefine) {
+  // Parse the linkage.
+  LocTy LinkageLoc = Lex.getLoc();
+  unsigned Linkage;
+
+  unsigned Visibility;
+  unsigned DLLStorageClass;
+  AttrBuilder RetAttrs;
+  unsigned CC;
+  bool HasLinkage;
+  Type *RetType = nullptr;
+  LocTy RetTypeLoc = Lex.getLoc();
+  if (ParseOptionalLinkage(Linkage, HasLinkage, Visibility, DLLStorageClass) ||
+      ParseOptionalCallingConv(CC) || ParseOptionalReturnAttrs(RetAttrs) ||
+      ParseType(RetType, RetTypeLoc, true /*void allowed*/))
+    return true;
+
+  // Verify that the linkage is ok.
+  switch ((GlobalValue::LinkageTypes)Linkage) {
+  case GlobalValue::ExternalLinkage:
+    break; // always ok.
+  case GlobalValue::ExternalWeakLinkage:
+    if (isDefine)
+      return Error(LinkageLoc, "invalid linkage for function definition");
+    break;
+  case GlobalValue::PrivateLinkage:
+  case GlobalValue::InternalLinkage:
+  case GlobalValue::AvailableExternallyLinkage:
+  case GlobalValue::LinkOnceAnyLinkage:
+  case GlobalValue::LinkOnceODRLinkage:
+  case GlobalValue::WeakAnyLinkage:
+  case GlobalValue::WeakODRLinkage:
+    if (!isDefine)
+      return Error(LinkageLoc, "invalid linkage for function declaration");
+    break;
+  case GlobalValue::AppendingLinkage:
+  case GlobalValue::CommonLinkage:
+    return Error(LinkageLoc, "invalid function linkage type");
+  }
+
+  if (!isValidVisibilityForLinkage(Visibility, Linkage))
+    return Error(LinkageLoc,
+                 "symbol with local linkage must have default visibility");
+
+  if (!FunctionType::isValidReturnType(RetType))
+    return Error(RetTypeLoc, "invalid function return type");
+
+  LocTy NameLoc = Lex.getLoc();
+
+  std::string FunctionName;
+  if (Lex.getKind() == lltok::GlobalVar) {
+    FunctionName = Lex.getStrVal();
+  } else if (Lex.getKind() == lltok::GlobalID) {     // @42 is ok.
+    unsigned NameID = Lex.getUIntVal();
+
+    if (NameID != NumberedVals.size())
+      return TokError("function expected to be numbered '%" +
+                      Twine(NumberedVals.size()) + "'");
+  } else {
+    return TokError("expected function name");
+  }
+
+  Lex.Lex();
+
+  if (Lex.getKind() != lltok::lparen)
+    return TokError("expected '(' in function argument list");
+
+  SmallVector<ArgInfo, 8> ArgList;
+  bool isVarArg;
+  AttrBuilder FuncAttrs;
+  std::vector<unsigned> FwdRefAttrGrps;
+  LocTy BuiltinLoc;
+  std::string Section;
+  unsigned Alignment;
+  std::string GC;
+  GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None;
+  LocTy UnnamedAddrLoc;
+  Constant *Prefix = nullptr;
+  Constant *Prologue = nullptr;
+  Constant *PersonalityFn = nullptr;
+  Comdat *C;
+
+  if (ParseArgumentList(ArgList, isVarArg) ||
+      ParseOptionalUnnamedAddr(UnnamedAddr) ||
+      ParseFnAttributeValuePairs(FuncAttrs, FwdRefAttrGrps, false,
+                                 BuiltinLoc) ||
+      (EatIfPresent(lltok::kw_section) &&
+       ParseStringConstant(Section)) ||
+      parseOptionalComdat(FunctionName, C) ||
+      ParseOptionalAlignment(Alignment) ||
+      (EatIfPresent(lltok::kw_gc) &&
+       ParseStringConstant(GC)) ||
+      (EatIfPresent(lltok::kw_prefix) &&
+       ParseGlobalTypeAndValue(Prefix)) ||
+      (EatIfPresent(lltok::kw_prologue) &&
+       ParseGlobalTypeAndValue(Prologue)) ||
+      (EatIfPresent(lltok::kw_personality) &&
+       ParseGlobalTypeAndValue(PersonalityFn)))
+    return true;
+
+  if (FuncAttrs.contains(Attribute::Builtin))
+    return Error(BuiltinLoc, "'builtin' attribute not valid on function");
+
+  // If the alignment was parsed as an attribute, move to the alignment field.
+  if (FuncAttrs.hasAlignmentAttr()) {
+    Alignment = FuncAttrs.getAlignment();
+    FuncAttrs.removeAttribute(Attribute::Alignment);
+  }
+
+  // Okay, if we got here, the function is syntactically valid.  Convert types
+  // and do semantic checks.
+  std::vector<Type*> ParamTypeList;
+  SmallVector<AttributeSet, 8> Attrs;
+
+  if (RetAttrs.hasAttributes())
+    Attrs.push_back(AttributeSet::get(RetType->getContext(),
+                                      AttributeSet::ReturnIndex,
+                                      RetAttrs));
+
+  for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+    ParamTypeList.push_back(ArgList[i].Ty);
+    if (ArgList[i].Attrs.hasAttributes(i + 1)) {
+      AttrBuilder B(ArgList[i].Attrs, i + 1);
+      Attrs.push_back(AttributeSet::get(RetType->getContext(), i + 1, B));
+    }
+  }
+
+  if (FuncAttrs.hasAttributes())
+    Attrs.push_back(AttributeSet::get(RetType->getContext(),
+                                      AttributeSet::FunctionIndex,
+                                      FuncAttrs));
+
+  AttributeSet PAL = AttributeSet::get(Context, Attrs);
+
+  if (PAL.hasAttribute(1, Attribute::StructRet) && !RetType->isVoidTy())
+    return Error(RetTypeLoc, "functions with 'sret' argument must return void");
+
+  FunctionType *FT =
+    FunctionType::get(RetType, ParamTypeList, isVarArg);
+  PointerType *PFT = PointerType::getUnqual(FT);
+
+  Fn = nullptr;
+  if (!FunctionName.empty()) {
+    // If this was a definition of a forward reference, remove the definition
+    // from the forward reference table and fill in the forward ref.
+    auto FRVI = ForwardRefVals.find(FunctionName);
+    if (FRVI != ForwardRefVals.end()) {
+      Fn = M->getFunction(FunctionName);
+      if (!Fn)
+        return Error(FRVI->second.second, "invalid forward reference to "
+                     "function as global value!");
+      if (Fn->getType() != PFT)
+        return Error(FRVI->second.second, "invalid forward reference to "
+                     "function '" + FunctionName + "' with wrong type!");
+
+      ForwardRefVals.erase(FRVI);
+    } else if ((Fn = M->getFunction(FunctionName))) {
+      // Reject redefinitions.
+      return Error(NameLoc, "invalid redefinition of function '" +
+                   FunctionName + "'");
+    } else if (M->getNamedValue(FunctionName)) {
+      return Error(NameLoc, "redefinition of function '@" + FunctionName + "'");
+    }
+
+  } else {
+    // If this is a definition of a forward referenced function, make sure the
+    // types agree.
+    auto I = ForwardRefValIDs.find(NumberedVals.size());
+    if (I != ForwardRefValIDs.end()) {
+      Fn = cast<Function>(I->second.first);
+      if (Fn->getType() != PFT)
+        return Error(NameLoc, "type of definition and forward reference of '@" +
+                     Twine(NumberedVals.size()) + "' disagree");
+      ForwardRefValIDs.erase(I);
+    }
+  }
+
+  if (!Fn)
+    Fn = Function::Create(FT, GlobalValue::ExternalLinkage, FunctionName, M);
+  else // Move the forward-reference to the correct spot in the module.
+    M->getFunctionList().splice(M->end(), M->getFunctionList(), Fn);
+
+  if (FunctionName.empty())
+    NumberedVals.push_back(Fn);
+
+  Fn->setLinkage((GlobalValue::LinkageTypes)Linkage);
+  Fn->setVisibility((GlobalValue::VisibilityTypes)Visibility);
+  Fn->setDLLStorageClass((GlobalValue::DLLStorageClassTypes)DLLStorageClass);
+  Fn->setCallingConv(CC);
+  Fn->setAttributes(PAL);
+  Fn->setUnnamedAddr(UnnamedAddr);
+  Fn->setAlignment(Alignment);
+  Fn->setSection(Section);
+  Fn->setComdat(C);
+  Fn->setPersonalityFn(PersonalityFn);
+  if (!GC.empty()) Fn->setGC(GC);
+  Fn->setPrefixData(Prefix);
+  Fn->setPrologueData(Prologue);
+  ForwardRefAttrGroups[Fn] = FwdRefAttrGrps;
+
+  // Add all of the arguments we parsed to the function.
+  Function::arg_iterator ArgIt = Fn->arg_begin();
+  for (unsigned i = 0, e = ArgList.size(); i != e; ++i, ++ArgIt) {
+    // If the argument has a name, insert it into the argument symbol table.
+    if (ArgList[i].Name.empty()) continue;
+
+    // Set the name, if it conflicted, it will be auto-renamed.
+    ArgIt->setName(ArgList[i].Name);
+
+    if (ArgIt->getName() != ArgList[i].Name)
+      return Error(ArgList[i].Loc, "redefinition of argument '%" +
+                   ArgList[i].Name + "'");
+  }
+
+  if (isDefine)
+    return false;
+
+  // Check the declaration has no block address forward references.
+  ValID ID;
+  if (FunctionName.empty()) {
+    ID.Kind = ValID::t_GlobalID;
+    ID.UIntVal = NumberedVals.size() - 1;
+  } else {
+    ID.Kind = ValID::t_GlobalName;
+    ID.StrVal = FunctionName;
+  }
+  auto Blocks = ForwardRefBlockAddresses.find(ID);
+  if (Blocks != ForwardRefBlockAddresses.end())
+    return Error(Blocks->first.Loc,
+                 "cannot take blockaddress inside a declaration");
+  return false;
+}
+
+bool LLParser::PerFunctionState::resolveForwardRefBlockAddresses() {
+  ValID ID;
+  if (FunctionNumber == -1) {
+    ID.Kind = ValID::t_GlobalName;
+    ID.StrVal = F.getName();
+  } else {
+    ID.Kind = ValID::t_GlobalID;
+    ID.UIntVal = FunctionNumber;
+  }
+
+  auto Blocks = P.ForwardRefBlockAddresses.find(ID);
+  if (Blocks == P.ForwardRefBlockAddresses.end())
+    return false;
+
+  for (const auto &I : Blocks->second) {
+    const ValID &BBID = I.first;
+    GlobalValue *GV = I.second;
+
+    assert((BBID.Kind == ValID::t_LocalID || BBID.Kind == ValID::t_LocalName) &&
+           "Expected local id or name");
+    BasicBlock *BB;
+    if (BBID.Kind == ValID::t_LocalName)
+      BB = GetBB(BBID.StrVal, BBID.Loc);
+    else
+      BB = GetBB(BBID.UIntVal, BBID.Loc);
+    if (!BB)
+      return P.Error(BBID.Loc, "referenced value is not a basic block");
+
+    GV->replaceAllUsesWith(BlockAddress::get(&F, BB));
+    GV->eraseFromParent();
+  }
+
+  P.ForwardRefBlockAddresses.erase(Blocks);
+  return false;
+}
+
+/// ParseFunctionBody
+///   ::= '{' BasicBlock+ UseListOrderDirective* '}'
+bool LLParser::ParseFunctionBody(Function &Fn) {
+  if (Lex.getKind() != lltok::lbrace)
+    return TokError("expected '{' in function body");
+  Lex.Lex();  // eat the {.
+
+  int FunctionNumber = -1;
+  if (!Fn.hasName()) FunctionNumber = NumberedVals.size()-1;
+
+  PerFunctionState PFS(*this, Fn, FunctionNumber);
+
+  // Resolve block addresses and allow basic blocks to be forward-declared
+  // within this function.
+  if (PFS.resolveForwardRefBlockAddresses())
+    return true;
+  SaveAndRestore<PerFunctionState *> ScopeExit(BlockAddressPFS, &PFS);
+
+  // We need at least one basic block.
+  if (Lex.getKind() == lltok::rbrace || Lex.getKind() == lltok::kw_uselistorder)
+    return TokError("function body requires at least one basic block");
+
+  while (Lex.getKind() != lltok::rbrace &&
+         Lex.getKind() != lltok::kw_uselistorder)
+    if (ParseBasicBlock(PFS)) return true;
+
+  while (Lex.getKind() != lltok::rbrace)
+    if (ParseUseListOrder(&PFS))
+      return true;
+
+  // Eat the }.
+  Lex.Lex();
+
+  // Verify function is ok.
+  return PFS.FinishFunction();
+}
+
+/// ParseBasicBlock
+///   ::= LabelStr? Instruction*
+bool LLParser::ParseBasicBlock(PerFunctionState &PFS) {
+  // If this basic block starts out with a name, remember it.
+  std::string Name;
+  LocTy NameLoc = Lex.getLoc();
+  if (Lex.getKind() == lltok::LabelStr) {
+    Name = Lex.getStrVal();
+    Lex.Lex();
+  }
+
+  BasicBlock *BB = PFS.DefineBB(Name, NameLoc);
+  if (!BB)
+    return Error(NameLoc,
+                 "unable to create block named '" + Name + "'");
+
+  std::string NameStr;
+
+  // Parse the instructions in this block until we get a terminator.
+  Instruction *Inst;
+  do {
+    // This instruction may have three possibilities for a name: a) none
+    // specified, b) name specified "%foo =", c) number specified: "%4 =".
+    LocTy NameLoc = Lex.getLoc();
+    int NameID = -1;
+    NameStr = "";
+
+    if (Lex.getKind() == lltok::LocalVarID) {
+      NameID = Lex.getUIntVal();
+      Lex.Lex();
+      if (ParseToken(lltok::equal, "expected '=' after instruction id"))
+        return true;
+    } else if (Lex.getKind() == lltok::LocalVar) {
+      NameStr = Lex.getStrVal();
+      Lex.Lex();
+      if (ParseToken(lltok::equal, "expected '=' after instruction name"))
+        return true;
+    }
+
+    switch (ParseInstruction(Inst, BB, PFS)) {
+    default: llvm_unreachable("Unknown ParseInstruction result!");
+    case InstError: return true;
+    case InstNormal:
+      BB->getInstList().push_back(Inst);
+
+      // With a normal result, we check to see if the instruction is followed by
+      // a comma and metadata.
+      if (EatIfPresent(lltok::comma))
+        if (ParseInstructionMetadata(*Inst))
+          return true;
+      break;
+    case InstExtraComma:
+      BB->getInstList().push_back(Inst);
+
+      // If the instruction parser ate an extra comma at the end of it, it
+      // *must* be followed by metadata.
+      if (ParseInstructionMetadata(*Inst))
+        return true;
+      break;
+    }
+
+    // Set the name on the instruction.
+    if (PFS.SetInstName(NameID, NameStr, NameLoc, Inst)) return true;
+  } while (!isa<TerminatorInst>(Inst));
+
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Instruction Parsing.
+//===----------------------------------------------------------------------===//
+
+/// ParseInstruction - Parse one of the many different instructions.
+///
+int LLParser::ParseInstruction(Instruction *&Inst, BasicBlock *BB,
+                               PerFunctionState &PFS) {
+  lltok::Kind Token = Lex.getKind();
+  if (Token == lltok::Eof)
+    return TokError("found end of file when expecting more instructions");
+  LocTy Loc = Lex.getLoc();
+  unsigned KeywordVal = Lex.getUIntVal();
+  Lex.Lex();  // Eat the keyword.
+
+  switch (Token) {
+  default:                    return Error(Loc, "expected instruction opcode");
+  // Terminator Instructions.
+  case lltok::kw_unreachable: Inst = new UnreachableInst(Context); return false;
+  case lltok::kw_ret:         return ParseRet(Inst, BB, PFS);
+  case lltok::kw_br:          return ParseBr(Inst, PFS);
+  case lltok::kw_switch:      return ParseSwitch(Inst, PFS);
+  case lltok::kw_indirectbr:  return ParseIndirectBr(Inst, PFS);
+  case lltok::kw_invoke:      return ParseInvoke(Inst, PFS);
+  case lltok::kw_resume:      return ParseResume(Inst, PFS);
+  case lltok::kw_cleanupret:  return ParseCleanupRet(Inst, PFS);
+  case lltok::kw_catchret:    return ParseCatchRet(Inst, PFS);
+  case lltok::kw_catchswitch: return ParseCatchSwitch(Inst, PFS);
+  case lltok::kw_catchpad:    return ParseCatchPad(Inst, PFS);
+  case lltok::kw_cleanuppad:  return ParseCleanupPad(Inst, PFS);
+  // Binary Operators.
+  case lltok::kw_add:
+  case lltok::kw_sub:
+  case lltok::kw_mul:
+  case lltok::kw_shl: {
+    bool NUW = EatIfPresent(lltok::kw_nuw);
+    bool NSW = EatIfPresent(lltok::kw_nsw);
+    if (!NUW) NUW = EatIfPresent(lltok::kw_nuw);
+
+    if (ParseArithmetic(Inst, PFS, KeywordVal, 1)) return true;
+
+    if (NUW) cast<BinaryOperator>(Inst)->setHasNoUnsignedWrap(true);
+    if (NSW) cast<BinaryOperator>(Inst)->setHasNoSignedWrap(true);
+    return false;
+  }
+  case lltok::kw_fadd:
+  case lltok::kw_fsub:
+  case lltok::kw_fmul:
+  case lltok::kw_fdiv:
+  case lltok::kw_frem: {
+    FastMathFlags FMF = EatFastMathFlagsIfPresent();
+    int Res = ParseArithmetic(Inst, PFS, KeywordVal, 2);
+    if (Res != 0)
+      return Res;
+    if (FMF.any())
+      Inst->setFastMathFlags(FMF);
+    return 0;
+  }
+
+  case lltok::kw_sdiv:
+  case lltok::kw_udiv:
+  case lltok::kw_lshr:
+  case lltok::kw_ashr: {
+    bool Exact = EatIfPresent(lltok::kw_exact);
+
+    if (ParseArithmetic(Inst, PFS, KeywordVal, 1)) return true;
+    if (Exact) cast<BinaryOperator>(Inst)->setIsExact(true);
+    return false;
+  }
+
+  case lltok::kw_urem:
+  case lltok::kw_srem:   return ParseArithmetic(Inst, PFS, KeywordVal, 1);
+  case lltok::kw_and:
+  case lltok::kw_or:
+  case lltok::kw_xor:    return ParseLogical(Inst, PFS, KeywordVal);
+  case lltok::kw_icmp:   return ParseCompare(Inst, PFS, KeywordVal);
+  case lltok::kw_fcmp: {
+    FastMathFlags FMF = EatFastMathFlagsIfPresent();
+    int Res = ParseCompare(Inst, PFS, KeywordVal);
+    if (Res != 0)
+      return Res;
+    if (FMF.any())
+      Inst->setFastMathFlags(FMF);
+    return 0;
+  }
+
+  // Casts.
+  case lltok::kw_trunc:
+  case lltok::kw_zext:
+  case lltok::kw_sext:
+  case lltok::kw_fptrunc:
+  case lltok::kw_fpext:
+  case lltok::kw_bitcast:
+  case lltok::kw_addrspacecast:
+  case lltok::kw_uitofp:
+  case lltok::kw_sitofp:
+  case lltok::kw_fptoui:
+  case lltok::kw_fptosi:
+  case lltok::kw_inttoptr:
+  case lltok::kw_ptrtoint:       return ParseCast(Inst, PFS, KeywordVal);
+  // Other.
+  case lltok::kw_select:         return ParseSelect(Inst, PFS);
+  case lltok::kw_va_arg:         return ParseVA_Arg(Inst, PFS);
+  case lltok::kw_extractelement: return ParseExtractElement(Inst, PFS);
+  case lltok::kw_insertelement:  return ParseInsertElement(Inst, PFS);
+  case lltok::kw_shufflevector:  return ParseShuffleVector(Inst, PFS);
+  case lltok::kw_phi:            return ParsePHI(Inst, PFS);
+  case lltok::kw_landingpad:     return ParseLandingPad(Inst, PFS);
+  // Call.
+  case lltok::kw_call:     return ParseCall(Inst, PFS, CallInst::TCK_None);
+  case lltok::kw_tail:     return ParseCall(Inst, PFS, CallInst::TCK_Tail);
+  case lltok::kw_musttail: return ParseCall(Inst, PFS, CallInst::TCK_MustTail);
+  case lltok::kw_notail:   return ParseCall(Inst, PFS, CallInst::TCK_NoTail);
+  // Memory.
+  case lltok::kw_alloca:         return ParseAlloc(Inst, PFS);
+  case lltok::kw_load:           return ParseLoad(Inst, PFS);
+  case lltok::kw_store:          return ParseStore(Inst, PFS);
+  case lltok::kw_cmpxchg:        return ParseCmpXchg(Inst, PFS);
+  case lltok::kw_atomicrmw:      return ParseAtomicRMW(Inst, PFS);
+  case lltok::kw_fence:          return ParseFence(Inst, PFS);
+  case lltok::kw_getelementptr: return ParseGetElementPtr(Inst, PFS);
+  case lltok::kw_extractvalue:  return ParseExtractValue(Inst, PFS);
+  case lltok::kw_insertvalue:   return ParseInsertValue(Inst, PFS);
+  }
+}
+
+/// ParseCmpPredicate - Parse an integer or fp predicate, based on Kind.
+bool LLParser::ParseCmpPredicate(unsigned &P, unsigned Opc) {
+  if (Opc == Instruction::FCmp) {
+    switch (Lex.getKind()) {
+    default: return TokError("expected fcmp predicate (e.g. 'oeq')");
+    case lltok::kw_oeq: P = CmpInst::FCMP_OEQ; break;
+    case lltok::kw_one: P = CmpInst::FCMP_ONE; break;
+    case lltok::kw_olt: P = CmpInst::FCMP_OLT; break;
+    case lltok::kw_ogt: P = CmpInst::FCMP_OGT; break;
+    case lltok::kw_ole: P = CmpInst::FCMP_OLE; break;
+    case lltok::kw_oge: P = CmpInst::FCMP_OGE; break;
+    case lltok::kw_ord: P = CmpInst::FCMP_ORD; break;
+    case lltok::kw_uno: P = CmpInst::FCMP_UNO; break;
+    case lltok::kw_ueq: P = CmpInst::FCMP_UEQ; break;
+    case lltok::kw_une: P = CmpInst::FCMP_UNE; break;
+    case lltok::kw_ult: P = CmpInst::FCMP_ULT; break;
+    case lltok::kw_ugt: P = CmpInst::FCMP_UGT; break;
+    case lltok::kw_ule: P = CmpInst::FCMP_ULE; break;
+    case lltok::kw_uge: P = CmpInst::FCMP_UGE; break;
+    case lltok::kw_true: P = CmpInst::FCMP_TRUE; break;
+    case lltok::kw_false: P = CmpInst::FCMP_FALSE; break;
+    }
+  } else {
+    switch (Lex.getKind()) {
+    default: return TokError("expected icmp predicate (e.g. 'eq')");
+    case lltok::kw_eq:  P = CmpInst::ICMP_EQ; break;
+    case lltok::kw_ne:  P = CmpInst::ICMP_NE; break;
+    case lltok::kw_slt: P = CmpInst::ICMP_SLT; break;
+    case lltok::kw_sgt: P = CmpInst::ICMP_SGT; break;
+    case lltok::kw_sle: P = CmpInst::ICMP_SLE; break;
+    case lltok::kw_sge: P = CmpInst::ICMP_SGE; break;
+    case lltok::kw_ult: P = CmpInst::ICMP_ULT; break;
+    case lltok::kw_ugt: P = CmpInst::ICMP_UGT; break;
+    case lltok::kw_ule: P = CmpInst::ICMP_ULE; break;
+    case lltok::kw_uge: P = CmpInst::ICMP_UGE; break;
+    }
+  }
+  Lex.Lex();
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Terminator Instructions.
+//===----------------------------------------------------------------------===//
+
+/// ParseRet - Parse a return instruction.
+///   ::= 'ret' void (',' !dbg, !1)*
+///   ::= 'ret' TypeAndValue (',' !dbg, !1)*
+bool LLParser::ParseRet(Instruction *&Inst, BasicBlock *BB,
+                        PerFunctionState &PFS) {
+  SMLoc TypeLoc = Lex.getLoc();
+  Type *Ty = nullptr;
+  if (ParseType(Ty, true /*void allowed*/)) return true;
+
+  Type *ResType = PFS.getFunction().getReturnType();
+
+  if (Ty->isVoidTy()) {
+    if (!ResType->isVoidTy())
+      return Error(TypeLoc, "value doesn't match function result type '" +
+                   getTypeString(ResType) + "'");
+
+    Inst = ReturnInst::Create(Context);
+    return false;
+  }
+
+  Value *RV;
+  if (ParseValue(Ty, RV, PFS)) return true;
+
+  if (ResType != RV->getType())
+    return Error(TypeLoc, "value doesn't match function result type '" +
+                 getTypeString(ResType) + "'");
+
+  Inst = ReturnInst::Create(Context, RV);
+  return false;
+}
+
+/// ParseBr
+///   ::= 'br' TypeAndValue
+///   ::= 'br' TypeAndValue ',' TypeAndValue ',' TypeAndValue
+bool LLParser::ParseBr(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy Loc, Loc2;
+  Value *Op0;
+  BasicBlock *Op1, *Op2;
+  if (ParseTypeAndValue(Op0, Loc, PFS)) return true;
+
+  if (BasicBlock *BB = dyn_cast<BasicBlock>(Op0)) {
+    Inst = BranchInst::Create(BB);
+    return false;
+  }
+
+  if (Op0->getType() != Type::getInt1Ty(Context))
+    return Error(Loc, "branch condition must have 'i1' type");
+
+  if (ParseToken(lltok::comma, "expected ',' after branch condition") ||
+      ParseTypeAndBasicBlock(Op1, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after true destination") ||
+      ParseTypeAndBasicBlock(Op2, Loc2, PFS))
+    return true;
+
+  Inst = BranchInst::Create(Op1, Op2, Op0);
+  return false;
+}
+
+/// ParseSwitch
+///  Instruction
+///    ::= 'switch' TypeAndValue ',' TypeAndValue '[' JumpTable ']'
+///  JumpTable
+///    ::= (TypeAndValue ',' TypeAndValue)*
+bool LLParser::ParseSwitch(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy CondLoc, BBLoc;
+  Value *Cond;
+  BasicBlock *DefaultBB;
+  if (ParseTypeAndValue(Cond, CondLoc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after switch condition") ||
+      ParseTypeAndBasicBlock(DefaultBB, BBLoc, PFS) ||
+      ParseToken(lltok::lsquare, "expected '[' with switch table"))
+    return true;
+
+  if (!Cond->getType()->isIntegerTy())
+    return Error(CondLoc, "switch condition must have integer type");
+
+  // Parse the jump table pairs.
+  SmallPtrSet<Value*, 32> SeenCases;
+  SmallVector<std::pair<ConstantInt*, BasicBlock*>, 32> Table;
+  while (Lex.getKind() != lltok::rsquare) {
+    Value *Constant;
+    BasicBlock *DestBB;
+
+    if (ParseTypeAndValue(Constant, CondLoc, PFS) ||
+        ParseToken(lltok::comma, "expected ',' after case value") ||
+        ParseTypeAndBasicBlock(DestBB, PFS))
+      return true;
+
+    if (!SeenCases.insert(Constant).second)
+      return Error(CondLoc, "duplicate case value in switch");
+    if (!isa<ConstantInt>(Constant))
+      return Error(CondLoc, "case value is not a constant integer");
+
+    Table.push_back(std::make_pair(cast<ConstantInt>(Constant), DestBB));
+  }
+
+  Lex.Lex();  // Eat the ']'.
+
+  SwitchInst *SI = SwitchInst::Create(Cond, DefaultBB, Table.size());
+  for (unsigned i = 0, e = Table.size(); i != e; ++i)
+    SI->addCase(Table[i].first, Table[i].second);
+  Inst = SI;
+  return false;
+}
+
+/// ParseIndirectBr
+///  Instruction
+///    ::= 'indirectbr' TypeAndValue ',' '[' LabelList ']'
+bool LLParser::ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy AddrLoc;
+  Value *Address;
+  if (ParseTypeAndValue(Address, AddrLoc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after indirectbr address") ||
+      ParseToken(lltok::lsquare, "expected '[' with indirectbr"))
+    return true;
+
+  if (!Address->getType()->isPointerTy())
+    return Error(AddrLoc, "indirectbr address must have pointer type");
+
+  // Parse the destination list.
+  SmallVector<BasicBlock*, 16> DestList;
+
+  if (Lex.getKind() != lltok::rsquare) {
+    BasicBlock *DestBB;
+    if (ParseTypeAndBasicBlock(DestBB, PFS))
+      return true;
+    DestList.push_back(DestBB);
+
+    while (EatIfPresent(lltok::comma)) {
+      if (ParseTypeAndBasicBlock(DestBB, PFS))
+        return true;
+      DestList.push_back(DestBB);
+    }
+  }
+
+  if (ParseToken(lltok::rsquare, "expected ']' at end of block list"))
+    return true;
+
+  IndirectBrInst *IBI = IndirectBrInst::Create(Address, DestList.size());
+  for (unsigned i = 0, e = DestList.size(); i != e; ++i)
+    IBI->addDestination(DestList[i]);
+  Inst = IBI;
+  return false;
+}
+
+/// ParseInvoke
+///   ::= 'invoke' OptionalCallingConv OptionalAttrs Type Value ParamList
+///       OptionalAttrs 'to' TypeAndValue 'unwind' TypeAndValue
+bool LLParser::ParseInvoke(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy CallLoc = Lex.getLoc();
+  AttrBuilder RetAttrs, FnAttrs;
+  std::vector<unsigned> FwdRefAttrGrps;
+  LocTy NoBuiltinLoc;
+  unsigned CC;
+  Type *RetType = nullptr;
+  LocTy RetTypeLoc;
+  ValID CalleeID;
+  SmallVector<ParamInfo, 16> ArgList;
+  SmallVector<OperandBundleDef, 2> BundleList;
+
+  BasicBlock *NormalBB, *UnwindBB;
+  if (ParseOptionalCallingConv(CC) || ParseOptionalReturnAttrs(RetAttrs) ||
+      ParseType(RetType, RetTypeLoc, true /*void allowed*/) ||
+      ParseValID(CalleeID) || ParseParameterList(ArgList, PFS) ||
+      ParseFnAttributeValuePairs(FnAttrs, FwdRefAttrGrps, false,
+                                 NoBuiltinLoc) ||
+      ParseOptionalOperandBundles(BundleList, PFS) ||
+      ParseToken(lltok::kw_to, "expected 'to' in invoke") ||
+      ParseTypeAndBasicBlock(NormalBB, PFS) ||
+      ParseToken(lltok::kw_unwind, "expected 'unwind' in invoke") ||
+      ParseTypeAndBasicBlock(UnwindBB, PFS))
+    return true;
+
+  // If RetType is a non-function pointer type, then this is the short syntax
+  // for the call, which means that RetType is just the return type.  Infer the
+  // rest of the function argument types from the arguments that are present.
+  FunctionType *Ty = dyn_cast<FunctionType>(RetType);
+  if (!Ty) {
+    // Pull out the types of all of the arguments...
+    std::vector<Type*> ParamTypes;
+    for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
+      ParamTypes.push_back(ArgList[i].V->getType());
+
+    if (!FunctionType::isValidReturnType(RetType))
+      return Error(RetTypeLoc, "Invalid result type for LLVM function");
+
+    Ty = FunctionType::get(RetType, ParamTypes, false);
+  }
+
+  CalleeID.FTy = Ty;
+
+  // Look up the callee.
+  Value *Callee;
+  if (ConvertValIDToValue(PointerType::getUnqual(Ty), CalleeID, Callee, &PFS))
+    return true;
+
+  // Set up the Attribute for the function.
+  SmallVector<AttributeSet, 8> Attrs;
+  if (RetAttrs.hasAttributes())
+    Attrs.push_back(AttributeSet::get(RetType->getContext(),
+                                      AttributeSet::ReturnIndex,
+                                      RetAttrs));
+
+  SmallVector<Value*, 8> Args;
+
+  // Loop through FunctionType's arguments and ensure they are specified
+  // correctly.  Also, gather any parameter attributes.
+  FunctionType::param_iterator I = Ty->param_begin();
+  FunctionType::param_iterator E = Ty->param_end();
+  for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+    Type *ExpectedTy = nullptr;
+    if (I != E) {
+      ExpectedTy = *I++;
+    } else if (!Ty->isVarArg()) {
+      return Error(ArgList[i].Loc, "too many arguments specified");
+    }
+
+    if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
+      return Error(ArgList[i].Loc, "argument is not of expected type '" +
+                   getTypeString(ExpectedTy) + "'");
+    Args.push_back(ArgList[i].V);
+    if (ArgList[i].Attrs.hasAttributes(i + 1)) {
+      AttrBuilder B(ArgList[i].Attrs, i + 1);
+      Attrs.push_back(AttributeSet::get(RetType->getContext(), i + 1, B));
+    }
+  }
+
+  if (I != E)
+    return Error(CallLoc, "not enough parameters specified for call");
+
+  if (FnAttrs.hasAttributes()) {
+    if (FnAttrs.hasAlignmentAttr())
+      return Error(CallLoc, "invoke instructions may not have an alignment");
+
+    Attrs.push_back(AttributeSet::get(RetType->getContext(),
+                                      AttributeSet::FunctionIndex,
+                                      FnAttrs));
+  }
+
+  // Finish off the Attribute and check them
+  AttributeSet PAL = AttributeSet::get(Context, Attrs);
+
+  InvokeInst *II =
+      InvokeInst::Create(Ty, Callee, NormalBB, UnwindBB, Args, BundleList);
+  II->setCallingConv(CC);
+  II->setAttributes(PAL);
+  ForwardRefAttrGroups[II] = FwdRefAttrGrps;
+  Inst = II;
+  return false;
+}
+
+/// ParseResume
+///   ::= 'resume' TypeAndValue
+bool LLParser::ParseResume(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Exn; LocTy ExnLoc;
+  if (ParseTypeAndValue(Exn, ExnLoc, PFS))
+    return true;
+
+  ResumeInst *RI = ResumeInst::Create(Exn);
+  Inst = RI;
+  return false;
+}
+
+bool LLParser::ParseExceptionArgs(SmallVectorImpl<Value *> &Args,
+                                  PerFunctionState &PFS) {
+  if (ParseToken(lltok::lsquare, "expected '[' in catchpad/cleanuppad"))
+    return true;
+
+  while (Lex.getKind() != lltok::rsquare) {
+    // If this isn't the first argument, we need a comma.
+    if (!Args.empty() &&
+        ParseToken(lltok::comma, "expected ',' in argument list"))
+      return true;
+
+    // Parse the argument.
+    LocTy ArgLoc;
+    Type *ArgTy = nullptr;
+    if (ParseType(ArgTy, ArgLoc))
+      return true;
+
+    Value *V;
+    if (ArgTy->isMetadataTy()) {
+      if (ParseMetadataAsValue(V, PFS))
+        return true;
+    } else {
+      if (ParseValue(ArgTy, V, PFS))
+        return true;
+    }
+    Args.push_back(V);
+  }
+
+  Lex.Lex();  // Lex the ']'.
+  return false;
+}
+
+/// ParseCleanupRet
+///   ::= 'cleanupret' from Value unwind ('to' 'caller' | TypeAndValue)
+bool LLParser::ParseCleanupRet(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *CleanupPad = nullptr;
+
+  if (ParseToken(lltok::kw_from, "expected 'from' after cleanupret"))
+    return true;
+
+  if (ParseValue(Type::getTokenTy(Context), CleanupPad, PFS))
+    return true;
+
+  if (ParseToken(lltok::kw_unwind, "expected 'unwind' in cleanupret"))
+    return true;
+
+  BasicBlock *UnwindBB = nullptr;
+  if (Lex.getKind() == lltok::kw_to) {
+    Lex.Lex();
+    if (ParseToken(lltok::kw_caller, "expected 'caller' in cleanupret"))
+      return true;
+  } else {
+    if (ParseTypeAndBasicBlock(UnwindBB, PFS)) {
+      return true;
+    }
+  }
+
+  Inst = CleanupReturnInst::Create(CleanupPad, UnwindBB);
+  return false;
+}
+
+/// ParseCatchRet
+///   ::= 'catchret' from Parent Value 'to' TypeAndValue
+bool LLParser::ParseCatchRet(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *CatchPad = nullptr;
+
+  if (ParseToken(lltok::kw_from, "expected 'from' after catchret"))
+    return true;
+
+  if (ParseValue(Type::getTokenTy(Context), CatchPad, PFS))
+    return true;
+
+  BasicBlock *BB;
+  if (ParseToken(lltok::kw_to, "expected 'to' in catchret") ||
+      ParseTypeAndBasicBlock(BB, PFS))
+      return true;
+
+  Inst = CatchReturnInst::Create(CatchPad, BB);
+  return false;
+}
+
+/// ParseCatchSwitch
+///   ::= 'catchswitch' within Parent
+bool LLParser::ParseCatchSwitch(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *ParentPad;
+  LocTy BBLoc;
+
+  if (ParseToken(lltok::kw_within, "expected 'within' after catchswitch"))
+    return true;
+
+  if (Lex.getKind() != lltok::kw_none && Lex.getKind() != lltok::LocalVar &&
+      Lex.getKind() != lltok::LocalVarID)
+    return TokError("expected scope value for catchswitch");
+
+  if (ParseValue(Type::getTokenTy(Context), ParentPad, PFS))
+    return true;
+
+  if (ParseToken(lltok::lsquare, "expected '[' with catchswitch labels"))
+    return true;
+
+  SmallVector<BasicBlock *, 32> Table;
+  do {
+    BasicBlock *DestBB;
+    if (ParseTypeAndBasicBlock(DestBB, PFS))
+      return true;
+    Table.push_back(DestBB);
+  } while (EatIfPresent(lltok::comma));
+
+  if (ParseToken(lltok::rsquare, "expected ']' after catchswitch labels"))
+    return true;
+
+  if (ParseToken(lltok::kw_unwind,
+                 "expected 'unwind' after catchswitch scope"))
+    return true;
+
+  BasicBlock *UnwindBB = nullptr;
+  if (EatIfPresent(lltok::kw_to)) {
+    if (ParseToken(lltok::kw_caller, "expected 'caller' in catchswitch"))
+      return true;
+  } else {
+    if (ParseTypeAndBasicBlock(UnwindBB, PFS))
+      return true;
+  }
+
+  auto *CatchSwitch =
+      CatchSwitchInst::Create(ParentPad, UnwindBB, Table.size());
+  for (BasicBlock *DestBB : Table)
+    CatchSwitch->addHandler(DestBB);
+  Inst = CatchSwitch;
+  return false;
+}
+
+/// ParseCatchPad
+///   ::= 'catchpad' ParamList 'to' TypeAndValue 'unwind' TypeAndValue
+bool LLParser::ParseCatchPad(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *CatchSwitch = nullptr;
+
+  if (ParseToken(lltok::kw_within, "expected 'within' after catchpad"))
+    return true;
+
+  if (Lex.getKind() != lltok::LocalVar && Lex.getKind() != lltok::LocalVarID)
+    return TokError("expected scope value for catchpad");
+
+  if (ParseValue(Type::getTokenTy(Context), CatchSwitch, PFS))
+    return true;
+
+  SmallVector<Value *, 8> Args;
+  if (ParseExceptionArgs(Args, PFS))
+    return true;
+
+  Inst = CatchPadInst::Create(CatchSwitch, Args);
+  return false;
+}
+
+/// ParseCleanupPad
+///   ::= 'cleanuppad' within Parent ParamList
+bool LLParser::ParseCleanupPad(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *ParentPad = nullptr;
+
+  if (ParseToken(lltok::kw_within, "expected 'within' after cleanuppad"))
+    return true;
+
+  if (Lex.getKind() != lltok::kw_none && Lex.getKind() != lltok::LocalVar &&
+      Lex.getKind() != lltok::LocalVarID)
+    return TokError("expected scope value for cleanuppad");
+
+  if (ParseValue(Type::getTokenTy(Context), ParentPad, PFS))
+    return true;
+
+  SmallVector<Value *, 8> Args;
+  if (ParseExceptionArgs(Args, PFS))
+    return true;
+
+  Inst = CleanupPadInst::Create(ParentPad, Args);
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators.
+//===----------------------------------------------------------------------===//
+
+/// ParseArithmetic
+///  ::= ArithmeticOps TypeAndValue ',' Value
+///
+/// If OperandType is 0, then any FP or integer operand is allowed.  If it is 1,
+/// then any integer operand is allowed, if it is 2, any fp operand is allowed.
+bool LLParser::ParseArithmetic(Instruction *&Inst, PerFunctionState &PFS,
+                               unsigned Opc, unsigned OperandType) {
+  LocTy Loc; Value *LHS, *RHS;
+  if (ParseTypeAndValue(LHS, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' in arithmetic operation") ||
+      ParseValue(LHS->getType(), RHS, PFS))
+    return true;
+
+  bool Valid;
+  switch (OperandType) {
+  default: llvm_unreachable("Unknown operand type!");
+  case 0: // int or FP.
+    Valid = LHS->getType()->isIntOrIntVectorTy() ||
+            LHS->getType()->isFPOrFPVectorTy();
+    break;
+  case 1: Valid = LHS->getType()->isIntOrIntVectorTy(); break;
+  case 2: Valid = LHS->getType()->isFPOrFPVectorTy(); break;
+  }
+
+  if (!Valid)
+    return Error(Loc, "invalid operand type for instruction");
+
+  Inst = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
+  return false;
+}
+
+/// ParseLogical
+///  ::= ArithmeticOps TypeAndValue ',' Value {
+bool LLParser::ParseLogical(Instruction *&Inst, PerFunctionState &PFS,
+                            unsigned Opc) {
+  LocTy Loc; Value *LHS, *RHS;
+  if (ParseTypeAndValue(LHS, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' in logical operation") ||
+      ParseValue(LHS->getType(), RHS, PFS))
+    return true;
+
+  if (!LHS->getType()->isIntOrIntVectorTy())
+    return Error(Loc,"instruction requires integer or integer vector operands");
+
+  Inst = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
+  return false;
+}
+
+/// ParseCompare
+///  ::= 'icmp' IPredicates TypeAndValue ',' Value
+///  ::= 'fcmp' FPredicates TypeAndValue ',' Value
+bool LLParser::ParseCompare(Instruction *&Inst, PerFunctionState &PFS,
+                            unsigned Opc) {
+  // Parse the integer/fp comparison predicate.
+  LocTy Loc;
+  unsigned Pred;
+  Value *LHS, *RHS;
+  if (ParseCmpPredicate(Pred, Opc) ||
+      ParseTypeAndValue(LHS, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after compare value") ||
+      ParseValue(LHS->getType(), RHS, PFS))
+    return true;
+
+  if (Opc == Instruction::FCmp) {
+    if (!LHS->getType()->isFPOrFPVectorTy())
+      return Error(Loc, "fcmp requires floating point operands");
+    Inst = new FCmpInst(CmpInst::Predicate(Pred), LHS, RHS);
+  } else {
+    assert(Opc == Instruction::ICmp && "Unknown opcode for CmpInst!");
+    if (!LHS->getType()->isIntOrIntVectorTy() &&
+        !LHS->getType()->getScalarType()->isPointerTy())
+      return Error(Loc, "icmp requires integer operands");
+    Inst = new ICmpInst(CmpInst::Predicate(Pred), LHS, RHS);
+  }
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Other Instructions.
+//===----------------------------------------------------------------------===//
+
+
+/// ParseCast
+///   ::= CastOpc TypeAndValue 'to' Type
+bool LLParser::ParseCast(Instruction *&Inst, PerFunctionState &PFS,
+                         unsigned Opc) {
+  LocTy Loc;
+  Value *Op;
+  Type *DestTy = nullptr;
+  if (ParseTypeAndValue(Op, Loc, PFS) ||
+      ParseToken(lltok::kw_to, "expected 'to' after cast value") ||
+      ParseType(DestTy))
+    return true;
+
+  if (!CastInst::castIsValid((Instruction::CastOps)Opc, Op, DestTy)) {
+    CastInst::castIsValid((Instruction::CastOps)Opc, Op, DestTy);
+    return Error(Loc, "invalid cast opcode for cast from '" +
+                 getTypeString(Op->getType()) + "' to '" +
+                 getTypeString(DestTy) + "'");
+  }
+  Inst = CastInst::Create((Instruction::CastOps)Opc, Op, DestTy);
+  return false;
+}
+
+/// ParseSelect
+///   ::= 'select' TypeAndValue ',' TypeAndValue ',' TypeAndValue
+bool LLParser::ParseSelect(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy Loc;
+  Value *Op0, *Op1, *Op2;
+  if (ParseTypeAndValue(Op0, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after select condition") ||
+      ParseTypeAndValue(Op1, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after select value") ||
+      ParseTypeAndValue(Op2, PFS))
+    return true;
+
+  if (const char *Reason = SelectInst::areInvalidOperands(Op0, Op1, Op2))
+    return Error(Loc, Reason);
+
+  Inst = SelectInst::Create(Op0, Op1, Op2);
+  return false;
+}
+
+/// ParseVA_Arg
+///   ::= 'va_arg' TypeAndValue ',' Type
+bool LLParser::ParseVA_Arg(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Op;
+  Type *EltTy = nullptr;
+  LocTy TypeLoc;
+  if (ParseTypeAndValue(Op, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after vaarg operand") ||
+      ParseType(EltTy, TypeLoc))
+    return true;
+
+  if (!EltTy->isFirstClassType())
+    return Error(TypeLoc, "va_arg requires operand with first class type");
+
+  Inst = new VAArgInst(Op, EltTy);
+  return false;
+}
+
+/// ParseExtractElement
+///   ::= 'extractelement' TypeAndValue ',' TypeAndValue
+bool LLParser::ParseExtractElement(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy Loc;
+  Value *Op0, *Op1;
+  if (ParseTypeAndValue(Op0, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after extract value") ||
+      ParseTypeAndValue(Op1, PFS))
+    return true;
+
+  if (!ExtractElementInst::isValidOperands(Op0, Op1))
+    return Error(Loc, "invalid extractelement operands");
+
+  Inst = ExtractElementInst::Create(Op0, Op1);
+  return false;
+}
+
+/// ParseInsertElement
+///   ::= 'insertelement' TypeAndValue ',' TypeAndValue ',' TypeAndValue
+bool LLParser::ParseInsertElement(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy Loc;
+  Value *Op0, *Op1, *Op2;
+  if (ParseTypeAndValue(Op0, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after insertelement value") ||
+      ParseTypeAndValue(Op1, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after insertelement value") ||
+      ParseTypeAndValue(Op2, PFS))
+    return true;
+
+  if (!InsertElementInst::isValidOperands(Op0, Op1, Op2))
+    return Error(Loc, "invalid insertelement operands");
+
+  Inst = InsertElementInst::Create(Op0, Op1, Op2);
+  return false;
+}
+
+/// ParseShuffleVector
+///   ::= 'shufflevector' TypeAndValue ',' TypeAndValue ',' TypeAndValue
+bool LLParser::ParseShuffleVector(Instruction *&Inst, PerFunctionState &PFS) {
+  LocTy Loc;
+  Value *Op0, *Op1, *Op2;
+  if (ParseTypeAndValue(Op0, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after shuffle mask") ||
+      ParseTypeAndValue(Op1, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after shuffle value") ||
+      ParseTypeAndValue(Op2, PFS))
+    return true;
+
+  if (!ShuffleVectorInst::isValidOperands(Op0, Op1, Op2))
+    return Error(Loc, "invalid shufflevector operands");
+
+  Inst = new ShuffleVectorInst(Op0, Op1, Op2);
+  return false;
+}
+
+/// ParsePHI
+///   ::= 'phi' Type '[' Value ',' Value ']' (',' '[' Value ',' Value ']')*
+int LLParser::ParsePHI(Instruction *&Inst, PerFunctionState &PFS) {
+  Type *Ty = nullptr;  LocTy TypeLoc;
+  Value *Op0, *Op1;
+
+  if (ParseType(Ty, TypeLoc) ||
+      ParseToken(lltok::lsquare, "expected '[' in phi value list") ||
+      ParseValue(Ty, Op0, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after insertelement value") ||
+      ParseValue(Type::getLabelTy(Context), Op1, PFS) ||
+      ParseToken(lltok::rsquare, "expected ']' in phi value list"))
+    return true;
+
+  bool AteExtraComma = false;
+  SmallVector<std::pair<Value*, BasicBlock*>, 16> PHIVals;
+
+  while (true) {
+    PHIVals.push_back(std::make_pair(Op0, cast<BasicBlock>(Op1)));
+
+    if (!EatIfPresent(lltok::comma))
+      break;
+
+    if (Lex.getKind() == lltok::MetadataVar) {
+      AteExtraComma = true;
+      break;
+    }
+
+    if (ParseToken(lltok::lsquare, "expected '[' in phi value list") ||
+        ParseValue(Ty, Op0, PFS) ||
+        ParseToken(lltok::comma, "expected ',' after insertelement value") ||
+        ParseValue(Type::getLabelTy(Context), Op1, PFS) ||
+        ParseToken(lltok::rsquare, "expected ']' in phi value list"))
+      return true;
+  }
+
+  if (!Ty->isFirstClassType())
+    return Error(TypeLoc, "phi node must have first class type");
+
+  PHINode *PN = PHINode::Create(Ty, PHIVals.size());
+  for (unsigned i = 0, e = PHIVals.size(); i != e; ++i)
+    PN->addIncoming(PHIVals[i].first, PHIVals[i].second);
+  Inst = PN;
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseLandingPad
+///   ::= 'landingpad' Type 'personality' TypeAndValue 'cleanup'? Clause+
+/// Clause
+///   ::= 'catch' TypeAndValue
+///   ::= 'filter'
+///   ::= 'filter' TypeAndValue ( ',' TypeAndValue )*
+bool LLParser::ParseLandingPad(Instruction *&Inst, PerFunctionState &PFS) {
+  Type *Ty = nullptr; LocTy TyLoc;
+
+  if (ParseType(Ty, TyLoc))
+    return true;
+
+  std::unique_ptr<LandingPadInst> LP(LandingPadInst::Create(Ty, 0));
+  LP->setCleanup(EatIfPresent(lltok::kw_cleanup));
+
+  while (Lex.getKind() == lltok::kw_catch || Lex.getKind() == lltok::kw_filter){
+    LandingPadInst::ClauseType CT;
+    if (EatIfPresent(lltok::kw_catch))
+      CT = LandingPadInst::Catch;
+    else if (EatIfPresent(lltok::kw_filter))
+      CT = LandingPadInst::Filter;
+    else
+      return TokError("expected 'catch' or 'filter' clause type");
+
+    Value *V;
+    LocTy VLoc;
+    if (ParseTypeAndValue(V, VLoc, PFS))
+      return true;
+
+    // A 'catch' type expects a non-array constant. A filter clause expects an
+    // array constant.
+    if (CT == LandingPadInst::Catch) {
+      if (isa<ArrayType>(V->getType()))
+        Error(VLoc, "'catch' clause has an invalid type");
+    } else {
+      if (!isa<ArrayType>(V->getType()))
+        Error(VLoc, "'filter' clause has an invalid type");
+    }
+
+    Constant *CV = dyn_cast<Constant>(V);
+    if (!CV)
+      return Error(VLoc, "clause argument must be a constant");
+    LP->addClause(CV);
+  }
+
+  Inst = LP.release();
+  return false;
+}
+
+/// ParseCall
+///   ::= 'call' OptionalFastMathFlags OptionalCallingConv
+///           OptionalAttrs Type Value ParameterList OptionalAttrs
+///   ::= 'tail' 'call' OptionalFastMathFlags OptionalCallingConv
+///           OptionalAttrs Type Value ParameterList OptionalAttrs
+///   ::= 'musttail' 'call' OptionalFastMathFlags OptionalCallingConv
+///           OptionalAttrs Type Value ParameterList OptionalAttrs
+///   ::= 'notail' 'call'  OptionalFastMathFlags OptionalCallingConv
+///           OptionalAttrs Type Value ParameterList OptionalAttrs
+bool LLParser::ParseCall(Instruction *&Inst, PerFunctionState &PFS,
+                         CallInst::TailCallKind TCK) {
+  AttrBuilder RetAttrs, FnAttrs;
+  std::vector<unsigned> FwdRefAttrGrps;
+  LocTy BuiltinLoc;
+  unsigned CC;
+  Type *RetType = nullptr;
+  LocTy RetTypeLoc;
+  ValID CalleeID;
+  SmallVector<ParamInfo, 16> ArgList;
+  SmallVector<OperandBundleDef, 2> BundleList;
+  LocTy CallLoc = Lex.getLoc();
+
+  if (TCK != CallInst::TCK_None &&
+      ParseToken(lltok::kw_call,
+                 "expected 'tail call', 'musttail call', or 'notail call'"))
+    return true;
+
+  FastMathFlags FMF = EatFastMathFlagsIfPresent();
+
+  if (ParseOptionalCallingConv(CC) || ParseOptionalReturnAttrs(RetAttrs) ||
+      ParseType(RetType, RetTypeLoc, true /*void allowed*/) ||
+      ParseValID(CalleeID) ||
+      ParseParameterList(ArgList, PFS, TCK == CallInst::TCK_MustTail,
+                         PFS.getFunction().isVarArg()) ||
+      ParseFnAttributeValuePairs(FnAttrs, FwdRefAttrGrps, false, BuiltinLoc) ||
+      ParseOptionalOperandBundles(BundleList, PFS))
+    return true;
+
+  if (FMF.any() && !RetType->isFPOrFPVectorTy())
+    return Error(CallLoc, "fast-math-flags specified for call without "
+                          "floating-point scalar or vector return type");
+
+  // If RetType is a non-function pointer type, then this is the short syntax
+  // for the call, which means that RetType is just the return type.  Infer the
+  // rest of the function argument types from the arguments that are present.
+  FunctionType *Ty = dyn_cast<FunctionType>(RetType);
+  if (!Ty) {
+    // Pull out the types of all of the arguments...
+    std::vector<Type*> ParamTypes;
+    for (unsigned i = 0, e = ArgList.size(); i != e; ++i)
+      ParamTypes.push_back(ArgList[i].V->getType());
+
+    if (!FunctionType::isValidReturnType(RetType))
+      return Error(RetTypeLoc, "Invalid result type for LLVM function");
+
+    Ty = FunctionType::get(RetType, ParamTypes, false);
+  }
+
+  CalleeID.FTy = Ty;
+
+  // Look up the callee.
+  Value *Callee;
+  if (ConvertValIDToValue(PointerType::getUnqual(Ty), CalleeID, Callee, &PFS))
+    return true;
+
+  // Set up the Attribute for the function.
+  SmallVector<AttributeSet, 8> Attrs;
+  if (RetAttrs.hasAttributes())
+    Attrs.push_back(AttributeSet::get(RetType->getContext(),
+                                      AttributeSet::ReturnIndex,
+                                      RetAttrs));
+
+  SmallVector<Value*, 8> Args;
+
+  // Loop through FunctionType's arguments and ensure they are specified
+  // correctly.  Also, gather any parameter attributes.
+  FunctionType::param_iterator I = Ty->param_begin();
+  FunctionType::param_iterator E = Ty->param_end();
+  for (unsigned i = 0, e = ArgList.size(); i != e; ++i) {
+    Type *ExpectedTy = nullptr;
+    if (I != E) {
+      ExpectedTy = *I++;
+    } else if (!Ty->isVarArg()) {
+      return Error(ArgList[i].Loc, "too many arguments specified");
+    }
+
+    if (ExpectedTy && ExpectedTy != ArgList[i].V->getType())
+      return Error(ArgList[i].Loc, "argument is not of expected type '" +
+                   getTypeString(ExpectedTy) + "'");
+    Args.push_back(ArgList[i].V);
+    if (ArgList[i].Attrs.hasAttributes(i + 1)) {
+      AttrBuilder B(ArgList[i].Attrs, i + 1);
+      Attrs.push_back(AttributeSet::get(RetType->getContext(), i + 1, B));
+    }
+  }
+
+  if (I != E)
+    return Error(CallLoc, "not enough parameters specified for call");
+
+  if (FnAttrs.hasAttributes()) {
+    if (FnAttrs.hasAlignmentAttr())
+      return Error(CallLoc, "call instructions may not have an alignment");
+
+    Attrs.push_back(AttributeSet::get(RetType->getContext(),
+                                      AttributeSet::FunctionIndex,
+                                      FnAttrs));
+  }
+
+  // Finish off the Attribute and check them
+  AttributeSet PAL = AttributeSet::get(Context, Attrs);
+
+  CallInst *CI = CallInst::Create(Ty, Callee, Args, BundleList);
+  CI->setTailCallKind(TCK);
+  CI->setCallingConv(CC);
+  if (FMF.any())
+    CI->setFastMathFlags(FMF);
+  CI->setAttributes(PAL);
+  ForwardRefAttrGroups[CI] = FwdRefAttrGrps;
+  Inst = CI;
+  return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Instructions.
+//===----------------------------------------------------------------------===//
+
+/// ParseAlloc
+///   ::= 'alloca' 'inalloca'? 'swifterror'? Type (',' TypeAndValue)?
+///       (',' 'align' i32)?
+int LLParser::ParseAlloc(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Size = nullptr;
+  LocTy SizeLoc, TyLoc;
+  unsigned Alignment = 0;
+  Type *Ty = nullptr;
+
+  bool IsInAlloca = EatIfPresent(lltok::kw_inalloca);
+  bool IsSwiftError = EatIfPresent(lltok::kw_swifterror);
+
+  if (ParseType(Ty, TyLoc)) return true;
+
+  if (Ty->isFunctionTy() || !PointerType::isValidElementType(Ty))
+    return Error(TyLoc, "invalid type for alloca");
+
+  bool AteExtraComma = false;
+  if (EatIfPresent(lltok::comma)) {
+    if (Lex.getKind() == lltok::kw_align) {
+      if (ParseOptionalAlignment(Alignment)) return true;
+    } else if (Lex.getKind() == lltok::MetadataVar) {
+      AteExtraComma = true;
+    } else {
+      if (ParseTypeAndValue(Size, SizeLoc, PFS) ||
+          ParseOptionalCommaAlign(Alignment, AteExtraComma))
+        return true;
+    }
+  }
+
+  if (Size && !Size->getType()->isIntegerTy())
+    return Error(SizeLoc, "element count must have integer type");
+
+  AllocaInst *AI = new AllocaInst(Ty, Size, Alignment);
+  AI->setUsedWithInAlloca(IsInAlloca);
+  AI->setSwiftError(IsSwiftError);
+  Inst = AI;
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseLoad
+///   ::= 'load' 'volatile'? TypeAndValue (',' 'align' i32)?
+///   ::= 'load' 'atomic' 'volatile'? TypeAndValue
+///       'singlethread'? AtomicOrdering (',' 'align' i32)?
+int LLParser::ParseLoad(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Val; LocTy Loc;
+  unsigned Alignment = 0;
+  bool AteExtraComma = false;
+  bool isAtomic = false;
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+  SynchronizationScope Scope = CrossThread;
+
+  if (Lex.getKind() == lltok::kw_atomic) {
+    isAtomic = true;
+    Lex.Lex();
+  }
+
+  bool isVolatile = false;
+  if (Lex.getKind() == lltok::kw_volatile) {
+    isVolatile = true;
+    Lex.Lex();
+  }
+
+  Type *Ty;
+  LocTy ExplicitTypeLoc = Lex.getLoc();
+  if (ParseType(Ty) ||
+      ParseToken(lltok::comma, "expected comma after load's type") ||
+      ParseTypeAndValue(Val, Loc, PFS) ||
+      ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
+      ParseOptionalCommaAlign(Alignment, AteExtraComma))
+    return true;
+
+  if (!Val->getType()->isPointerTy() || !Ty->isFirstClassType())
+    return Error(Loc, "load operand must be a pointer to a first class type");
+  if (isAtomic && !Alignment)
+    return Error(Loc, "atomic load must have explicit non-zero alignment");
+  if (Ordering == AtomicOrdering::Release ||
+      Ordering == AtomicOrdering::AcquireRelease)
+    return Error(Loc, "atomic load cannot use Release ordering");
+
+  if (Ty != cast<PointerType>(Val->getType())->getElementType())
+    return Error(ExplicitTypeLoc,
+                 "explicit pointee type doesn't match operand's pointee type");
+
+  Inst = new LoadInst(Ty, Val, "", isVolatile, Alignment, Ordering, Scope);
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseStore
+
+///   ::= 'store' 'volatile'? TypeAndValue ',' TypeAndValue (',' 'align' i32)?
+///   ::= 'store' 'atomic' 'volatile'? TypeAndValue ',' TypeAndValue
+///       'singlethread'? AtomicOrdering (',' 'align' i32)?
+int LLParser::ParseStore(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Val, *Ptr; LocTy Loc, PtrLoc;
+  unsigned Alignment = 0;
+  bool AteExtraComma = false;
+  bool isAtomic = false;
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+  SynchronizationScope Scope = CrossThread;
+
+  if (Lex.getKind() == lltok::kw_atomic) {
+    isAtomic = true;
+    Lex.Lex();
+  }
+
+  bool isVolatile = false;
+  if (Lex.getKind() == lltok::kw_volatile) {
+    isVolatile = true;
+    Lex.Lex();
+  }
+
+  if (ParseTypeAndValue(Val, Loc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after store operand") ||
+      ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
+      ParseScopeAndOrdering(isAtomic, Scope, Ordering) ||
+      ParseOptionalCommaAlign(Alignment, AteExtraComma))
+    return true;
+
+  if (!Ptr->getType()->isPointerTy())
+    return Error(PtrLoc, "store operand must be a pointer");
+  if (!Val->getType()->isFirstClassType())
+    return Error(Loc, "store operand must be a first class value");
+  if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
+    return Error(Loc, "stored value and pointer type do not match");
+  if (isAtomic && !Alignment)
+    return Error(Loc, "atomic store must have explicit non-zero alignment");
+  if (Ordering == AtomicOrdering::Acquire ||
+      Ordering == AtomicOrdering::AcquireRelease)
+    return Error(Loc, "atomic store cannot use Acquire ordering");
+
+  Inst = new StoreInst(Val, Ptr, isVolatile, Alignment, Ordering, Scope);
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseCmpXchg
+///   ::= 'cmpxchg' 'weak'? 'volatile'? TypeAndValue ',' TypeAndValue ','
+///       TypeAndValue 'singlethread'? AtomicOrdering AtomicOrdering
+int LLParser::ParseCmpXchg(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Ptr, *Cmp, *New; LocTy PtrLoc, CmpLoc, NewLoc;
+  bool AteExtraComma = false;
+  AtomicOrdering SuccessOrdering = AtomicOrdering::NotAtomic;
+  AtomicOrdering FailureOrdering = AtomicOrdering::NotAtomic;
+  SynchronizationScope Scope = CrossThread;
+  bool isVolatile = false;
+  bool isWeak = false;
+
+  if (EatIfPresent(lltok::kw_weak))
+    isWeak = true;
+
+  if (EatIfPresent(lltok::kw_volatile))
+    isVolatile = true;
+
+  if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after cmpxchg address") ||
+      ParseTypeAndValue(Cmp, CmpLoc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after cmpxchg cmp operand") ||
+      ParseTypeAndValue(New, NewLoc, PFS) ||
+      ParseScopeAndOrdering(true /*Always atomic*/, Scope, SuccessOrdering) ||
+      ParseOrdering(FailureOrdering))
+    return true;
+
+  if (SuccessOrdering == AtomicOrdering::Unordered ||
+      FailureOrdering == AtomicOrdering::Unordered)
+    return TokError("cmpxchg cannot be unordered");
+  if (isStrongerThan(FailureOrdering, SuccessOrdering))
+    return TokError("cmpxchg failure argument shall be no stronger than the "
+                    "success argument");
+  if (FailureOrdering == AtomicOrdering::Release ||
+      FailureOrdering == AtomicOrdering::AcquireRelease)
+    return TokError(
+        "cmpxchg failure ordering cannot include release semantics");
+  if (!Ptr->getType()->isPointerTy())
+    return Error(PtrLoc, "cmpxchg operand must be a pointer");
+  if (cast<PointerType>(Ptr->getType())->getElementType() != Cmp->getType())
+    return Error(CmpLoc, "compare value and pointer type do not match");
+  if (cast<PointerType>(Ptr->getType())->getElementType() != New->getType())
+    return Error(NewLoc, "new value and pointer type do not match");
+  if (!New->getType()->isFirstClassType())
+    return Error(NewLoc, "cmpxchg operand must be a first class value");
+  AtomicCmpXchgInst *CXI = new AtomicCmpXchgInst(
+      Ptr, Cmp, New, SuccessOrdering, FailureOrdering, Scope);
+  CXI->setVolatile(isVolatile);
+  CXI->setWeak(isWeak);
+  Inst = CXI;
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseAtomicRMW
+///   ::= 'atomicrmw' 'volatile'? BinOp TypeAndValue ',' TypeAndValue
+///       'singlethread'? AtomicOrdering
+int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Ptr, *Val; LocTy PtrLoc, ValLoc;
+  bool AteExtraComma = false;
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+  SynchronizationScope Scope = CrossThread;
+  bool isVolatile = false;
+  AtomicRMWInst::BinOp Operation;
+
+  if (EatIfPresent(lltok::kw_volatile))
+    isVolatile = true;
+
+  switch (Lex.getKind()) {
+  default: return TokError("expected binary operation in atomicrmw");
+  case lltok::kw_xchg: Operation = AtomicRMWInst::Xchg; break;
+  case lltok::kw_add: Operation = AtomicRMWInst::Add; break;
+  case lltok::kw_sub: Operation = AtomicRMWInst::Sub; break;
+  case lltok::kw_and: Operation = AtomicRMWInst::And; break;
+  case lltok::kw_nand: Operation = AtomicRMWInst::Nand; break;
+  case lltok::kw_or: Operation = AtomicRMWInst::Or; break;
+  case lltok::kw_xor: Operation = AtomicRMWInst::Xor; break;
+  case lltok::kw_max: Operation = AtomicRMWInst::Max; break;
+  case lltok::kw_min: Operation = AtomicRMWInst::Min; break;
+  case lltok::kw_umax: Operation = AtomicRMWInst::UMax; break;
+  case lltok::kw_umin: Operation = AtomicRMWInst::UMin; break;
+  }
+  Lex.Lex();  // Eat the operation.
+
+  if (ParseTypeAndValue(Ptr, PtrLoc, PFS) ||
+      ParseToken(lltok::comma, "expected ',' after atomicrmw address") ||
+      ParseTypeAndValue(Val, ValLoc, PFS) ||
+      ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+    return true;
+
+  if (Ordering == AtomicOrdering::Unordered)
+    return TokError("atomicrmw cannot be unordered");
+  if (!Ptr->getType()->isPointerTy())
+    return Error(PtrLoc, "atomicrmw operand must be a pointer");
+  if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
+    return Error(ValLoc, "atomicrmw value and pointer type do not match");
+  if (!Val->getType()->isIntegerTy())
+    return Error(ValLoc, "atomicrmw operand must be an integer");
+  unsigned Size = Val->getType()->getPrimitiveSizeInBits();
+  if (Size < 8 || (Size & (Size - 1)))
+    return Error(ValLoc, "atomicrmw operand must be power-of-two byte-sized"
+                         " integer");
+
+  AtomicRMWInst *RMWI =
+    new AtomicRMWInst(Operation, Ptr, Val, Ordering, Scope);
+  RMWI->setVolatile(isVolatile);
+  Inst = RMWI;
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseFence
+///   ::= 'fence' 'singlethread'? AtomicOrdering
+int LLParser::ParseFence(Instruction *&Inst, PerFunctionState &PFS) {
+  AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
+  SynchronizationScope Scope = CrossThread;
+  if (ParseScopeAndOrdering(true /*Always atomic*/, Scope, Ordering))
+    return true;
+
+  if (Ordering == AtomicOrdering::Unordered)
+    return TokError("fence cannot be unordered");
+  if (Ordering == AtomicOrdering::Monotonic)
+    return TokError("fence cannot be monotonic");
+
+  Inst = new FenceInst(Context, Ordering, Scope);
+  return InstNormal;
+}
+
+/// ParseGetElementPtr
+///   ::= 'getelementptr' 'inbounds'? TypeAndValue (',' TypeAndValue)*
+int LLParser::ParseGetElementPtr(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Ptr = nullptr;
+  Value *Val = nullptr;
+  LocTy Loc, EltLoc;
+
+  bool InBounds = EatIfPresent(lltok::kw_inbounds);
+
+  Type *Ty = nullptr;
+  LocTy ExplicitTypeLoc = Lex.getLoc();
+  if (ParseType(Ty) ||
+      ParseToken(lltok::comma, "expected comma after getelementptr's type") ||
+      ParseTypeAndValue(Ptr, Loc, PFS))
+    return true;
+
+  Type *BaseType = Ptr->getType();
+  PointerType *BasePointerType = dyn_cast<PointerType>(BaseType->getScalarType());
+  if (!BasePointerType)
+    return Error(Loc, "base of getelementptr must be a pointer");
+
+  if (Ty != BasePointerType->getElementType())
+    return Error(ExplicitTypeLoc,
+                 "explicit pointee type doesn't match operand's pointee type");
+
+  SmallVector<Value*, 16> Indices;
+  bool AteExtraComma = false;
+  // GEP returns a vector of pointers if at least one of parameters is a vector.
+  // All vector parameters should have the same vector width.
+  unsigned GEPWidth = BaseType->isVectorTy() ?
+    BaseType->getVectorNumElements() : 0;
+
+  while (EatIfPresent(lltok::comma)) {
+    if (Lex.getKind() == lltok::MetadataVar) {
+      AteExtraComma = true;
+      break;
+    }
+    if (ParseTypeAndValue(Val, EltLoc, PFS)) return true;
+    if (!Val->getType()->getScalarType()->isIntegerTy())
+      return Error(EltLoc, "getelementptr index must be an integer");
+
+    if (Val->getType()->isVectorTy()) {
+      unsigned ValNumEl = Val->getType()->getVectorNumElements();
+      if (GEPWidth && GEPWidth != ValNumEl)
+        return Error(EltLoc,
+          "getelementptr vector index has a wrong number of elements");
+      GEPWidth = ValNumEl;
+    }
+    Indices.push_back(Val);
+  }
+
+  SmallPtrSet<Type*, 4> Visited;
+  if (!Indices.empty() && !Ty->isSized(&Visited))
+    return Error(Loc, "base element of getelementptr must be sized");
+
+  if (!GetElementPtrInst::getIndexedType(Ty, Indices))
+    return Error(Loc, "invalid getelementptr indices");
+  Inst = GetElementPtrInst::Create(Ty, Ptr, Indices);
+  if (InBounds)
+    cast<GetElementPtrInst>(Inst)->setIsInBounds(true);
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseExtractValue
+///   ::= 'extractvalue' TypeAndValue (',' uint32)+
+int LLParser::ParseExtractValue(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Val; LocTy Loc;
+  SmallVector<unsigned, 4> Indices;
+  bool AteExtraComma;
+  if (ParseTypeAndValue(Val, Loc, PFS) ||
+      ParseIndexList(Indices, AteExtraComma))
+    return true;
+
+  if (!Val->getType()->isAggregateType())
+    return Error(Loc, "extractvalue operand must be aggregate type");
+
+  if (!ExtractValueInst::getIndexedType(Val->getType(), Indices))
+    return Error(Loc, "invalid indices for extractvalue");
+  Inst = ExtractValueInst::Create(Val, Indices);
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+/// ParseInsertValue
+///   ::= 'insertvalue' TypeAndValue ',' TypeAndValue (',' uint32)+
+int LLParser::ParseInsertValue(Instruction *&Inst, PerFunctionState &PFS) {
+  Value *Val0, *Val1; LocTy Loc0, Loc1;
+  SmallVector<unsigned, 4> Indices;
+  bool AteExtraComma;
+  if (ParseTypeAndValue(Val0, Loc0, PFS) ||
+      ParseToken(lltok::comma, "expected comma after insertvalue operand") ||
+      ParseTypeAndValue(Val1, Loc1, PFS) ||
+      ParseIndexList(Indices, AteExtraComma))
+    return true;
+
+  if (!Val0->getType()->isAggregateType())
+    return Error(Loc0, "insertvalue operand must be aggregate type");
+
+  Type *IndexedType = ExtractValueInst::getIndexedType(Val0->getType(), Indices);
+  if (!IndexedType)
+    return Error(Loc0, "invalid indices for insertvalue");
+  if (IndexedType != Val1->getType())
+    return Error(Loc1, "insertvalue operand and field disagree in type: '" +
+                           getTypeString(Val1->getType()) + "' instead of '" +
+                           getTypeString(IndexedType) + "'");
+  Inst = InsertValueInst::Create(Val0, Val1, Indices);
+  return AteExtraComma ? InstExtraComma : InstNormal;
+}
+
+//===----------------------------------------------------------------------===//
+// Embedded metadata.
+//===----------------------------------------------------------------------===//
+
+/// ParseMDNodeVector
+///   ::= { Element (',' Element)* }
+/// Element
+///   ::= 'null' | TypeAndValue
+bool LLParser::ParseMDNodeVector(SmallVectorImpl<Metadata *> &Elts) {
+  if (ParseToken(lltok::lbrace, "expected '{' here"))
+    return true;
+
+  // Check for an empty list.
+  if (EatIfPresent(lltok::rbrace))
+    return false;
+
+  do {
+    // Null is a special case since it is typeless.
+    if (EatIfPresent(lltok::kw_null)) {
+      Elts.push_back(nullptr);
+      continue;
+    }
+
+    Metadata *MD;
+    if (ParseMetadata(MD, nullptr))
+      return true;
+    Elts.push_back(MD);
+  } while (EatIfPresent(lltok::comma));
+
+  return ParseToken(lltok::rbrace, "expected end of metadata node");
+}
+
+//===----------------------------------------------------------------------===//
+// Use-list order directives.
+//===----------------------------------------------------------------------===//
+bool LLParser::sortUseListOrder(Value *V, ArrayRef<unsigned> Indexes,
+                                SMLoc Loc) {
+  if (V->use_empty())
+    return Error(Loc, "value has no uses");
+
+  unsigned NumUses = 0;
+  SmallDenseMap<const Use *, unsigned, 16> Order;
+  for (const Use &U : V->uses()) {
+    if (++NumUses > Indexes.size())
+      break;
+    Order[&U] = Indexes[NumUses - 1];
+  }
+  if (NumUses < 2)
+    return Error(Loc, "value only has one use");
+  if (Order.size() != Indexes.size() || NumUses > Indexes.size())
+    return Error(Loc, "wrong number of indexes, expected " +
+                          Twine(std::distance(V->use_begin(), V->use_end())));
+
+  V->sortUseList([&](const Use &L, const Use &R) {
+    return Order.lookup(&L) < Order.lookup(&R);
+  });
+  return false;
+}
+
+/// ParseUseListOrderIndexes
+///   ::= '{' uint32 (',' uint32)+ '}'
+bool LLParser::ParseUseListOrderIndexes(SmallVectorImpl<unsigned> &Indexes) {
+  SMLoc Loc = Lex.getLoc();
+  if (ParseToken(lltok::lbrace, "expected '{' here"))
+    return true;
+  if (Lex.getKind() == lltok::rbrace)
+    return Lex.Error("expected non-empty list of uselistorder indexes");
+
+  // Use Offset, Max, and IsOrdered to check consistency of indexes.  The
+  // indexes should be distinct numbers in the range [0, size-1], and should
+  // not be in order.
+  unsigned Offset = 0;
+  unsigned Max = 0;
+  bool IsOrdered = true;
+  assert(Indexes.empty() && "Expected empty order vector");
+  do {
+    unsigned Index;
+    if (ParseUInt32(Index))
+      return true;
+
+    // Update consistency checks.
+    Offset += Index - Indexes.size();
+    Max = std::max(Max, Index);
+    IsOrdered &= Index == Indexes.size();
+
+    Indexes.push_back(Index);
+  } while (EatIfPresent(lltok::comma));
+
+  if (ParseToken(lltok::rbrace, "expected '}' here"))
+    return true;
+
+  if (Indexes.size() < 2)
+    return Error(Loc, "expected >= 2 uselistorder indexes");
+  if (Offset != 0 || Max >= Indexes.size())
+    return Error(Loc, "expected distinct uselistorder indexes in range [0, size)");
+  if (IsOrdered)
+    return Error(Loc, "expected uselistorder indexes to change the order");
+
+  return false;
+}
+
+/// ParseUseListOrder
+///   ::= 'uselistorder' Type Value ',' UseListOrderIndexes
+bool LLParser::ParseUseListOrder(PerFunctionState *PFS) {
+  SMLoc Loc = Lex.getLoc();
+  if (ParseToken(lltok::kw_uselistorder, "expected uselistorder directive"))
+    return true;
+
+  Value *V;
+  SmallVector<unsigned, 16> Indexes;
+  if (ParseTypeAndValue(V, PFS) ||
+      ParseToken(lltok::comma, "expected comma in uselistorder directive") ||
+      ParseUseListOrderIndexes(Indexes))
+    return true;
+
+  return sortUseListOrder(V, Indexes, Loc);
+}
+
+/// ParseUseListOrderBB
+///   ::= 'uselistorder_bb' @foo ',' %bar ',' UseListOrderIndexes
+bool LLParser::ParseUseListOrderBB() {
+  assert(Lex.getKind() == lltok::kw_uselistorder_bb);
+  SMLoc Loc = Lex.getLoc();
+  Lex.Lex();
+
+  ValID Fn, Label;
+  SmallVector<unsigned, 16> Indexes;
+  if (ParseValID(Fn) ||
+      ParseToken(lltok::comma, "expected comma in uselistorder_bb directive") ||
+      ParseValID(Label) ||
+      ParseToken(lltok::comma, "expected comma in uselistorder_bb directive") ||
+      ParseUseListOrderIndexes(Indexes))
+    return true;
+
+  // Check the function.
+  GlobalValue *GV;
+  if (Fn.Kind == ValID::t_GlobalName)
+    GV = M->getNamedValue(Fn.StrVal);
+  else if (Fn.Kind == ValID::t_GlobalID)
+    GV = Fn.UIntVal < NumberedVals.size() ? NumberedVals[Fn.UIntVal] : nullptr;
+  else
+    return Error(Fn.Loc, "expected function name in uselistorder_bb");
+  if (!GV)
+    return Error(Fn.Loc, "invalid function forward reference in uselistorder_bb");
+  auto *F = dyn_cast<Function>(GV);
+  if (!F)
+    return Error(Fn.Loc, "expected function name in uselistorder_bb");
+  if (F->isDeclaration())
+    return Error(Fn.Loc, "invalid declaration in uselistorder_bb");
+
+  // Check the basic block.
+  if (Label.Kind == ValID::t_LocalID)
+    return Error(Label.Loc, "invalid numeric label in uselistorder_bb");
+  if (Label.Kind != ValID::t_LocalName)
+    return Error(Label.Loc, "expected basic block name in uselistorder_bb");
+  Value *V = F->getValueSymbolTable()->lookup(Label.StrVal);
+  if (!V)
+    return Error(Label.Loc, "invalid basic block in uselistorder_bb");
+  if (!isa<BasicBlock>(V))
+    return Error(Label.Loc, "expected basic block in uselistorder_bb");
+
+  return sortUseListOrder(V, Indexes, Loc);
+}
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp.patch b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp.patch
new file mode 100644
index 0000000000..632cb9488d
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.cpp.patch
@@ -0,0 +1,36 @@
+--- ../../../lib/AsmParser/LLParser.cpp	2019-12-29 18:23:35.463918719 -0600
++++ lib/AsmParser/LLParser.cpp	2019-12-29 18:44:13.295269292 -0600
+@@ -1138,6 +1138,11 @@
+     case lltok::kw_sret:
+     case lltok::kw_swifterror:
+     case lltok::kw_swiftself:
++    // VISC Parameter only attributes
++    case lltok::kw_in:
++    case lltok::kw_out:
++    case lltok::kw_inout:
++
+       HaveError |=
+         Error(Lex.getLoc(),
+               "invalid use of parameter-only attribute on a function");
+@@ -1413,6 +1418,10 @@
+     case lltok::kw_swiftself:       B.addAttribute(Attribute::SwiftSelf); break;
+     case lltok::kw_writeonly:       B.addAttribute(Attribute::WriteOnly); break;
+     case lltok::kw_zeroext:         B.addAttribute(Attribute::ZExt); break;
++    // VISC parameter attributes
++    case lltok::kw_in:              B.addAttribute(Attribute::In); break;
++    case lltok::kw_out:             B.addAttribute(Attribute::Out); break;
++    case lltok::kw_inout:           B.addAttribute(Attribute::InOut); break;
+ 
+     case lltok::kw_alignstack:
+     case lltok::kw_alwaysinline:
+@@ -1501,6 +1510,10 @@
+     case lltok::kw_sret:
+     case lltok::kw_swifterror:
+     case lltok::kw_swiftself:
++    // VISC Parameter only attributes
++    case lltok::kw_in:
++    case lltok::kw_out:
++    case lltok::kw_inout:
+       HaveError |= Error(Lex.getLoc(), "invalid use of parameter-only attribute");
+       break;
+ 
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.h b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.h
new file mode 100644
index 0000000000..16d4e8b5ba
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.h
@@ -0,0 +1,510 @@
+//===-- LLParser.h - Parser Class -------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//  This file defines the parser class for .ll files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_ASMPARSER_LLPARSER_H
+#define LLVM_LIB_ASMPARSER_LLPARSER_H
+
+#include "LLLexer.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/ValueHandle.h"
+#include <map>
+
+namespace llvm {
+  class Module;
+  class OpaqueType;
+  class Function;
+  class Value;
+  class BasicBlock;
+  class Instruction;
+  class Constant;
+  class GlobalValue;
+  class Comdat;
+  class MDString;
+  class MDNode;
+  struct SlotMapping;
+  class StructType;
+
+  /// ValID - Represents a reference of a definition of some sort with no type.
+  /// There are several cases where we have to parse the value but where the
+  /// type can depend on later context.  This may either be a numeric reference
+  /// or a symbolic (%var) reference.  This is just a discriminated union.
+  struct ValID {
+    enum {
+      t_LocalID, t_GlobalID,           // ID in UIntVal.
+      t_LocalName, t_GlobalName,       // Name in StrVal.
+      t_APSInt, t_APFloat,             // Value in APSIntVal/APFloatVal.
+      t_Null, t_Undef, t_Zero, t_None, // No value.
+      t_EmptyArray,                    // No value:  []
+      t_Constant,                      // Value in ConstantVal.
+      t_InlineAsm,                     // Value in FTy/StrVal/StrVal2/UIntVal.
+      t_ConstantStruct,                // Value in ConstantStructElts.
+      t_PackedConstantStruct           // Value in ConstantStructElts.
+    } Kind = t_LocalID;
+
+    LLLexer::LocTy Loc;
+    unsigned UIntVal;
+    FunctionType *FTy = nullptr;
+    std::string StrVal, StrVal2;
+    APSInt APSIntVal;
+    APFloat APFloatVal{0.0};
+    Constant *ConstantVal;
+    std::unique_ptr<Constant *[]> ConstantStructElts;
+
+    ValID() = default;
+    ValID(const ValID &RHS)
+        : Kind(RHS.Kind), Loc(RHS.Loc), UIntVal(RHS.UIntVal), FTy(RHS.FTy),
+          StrVal(RHS.StrVal), StrVal2(RHS.StrVal2), APSIntVal(RHS.APSIntVal),
+          APFloatVal(RHS.APFloatVal), ConstantVal(RHS.ConstantVal) {
+      assert(!RHS.ConstantStructElts);
+    }
+
+    bool operator<(const ValID &RHS) const {
+      if (Kind == t_LocalID || Kind == t_GlobalID)
+        return UIntVal < RHS.UIntVal;
+      assert((Kind == t_LocalName || Kind == t_GlobalName ||
+              Kind == t_ConstantStruct || Kind == t_PackedConstantStruct) &&
+             "Ordering not defined for this ValID kind yet");
+      return StrVal < RHS.StrVal;
+    }
+  };
+
+  class LLParser {
+  public:
+    typedef LLLexer::LocTy LocTy;
+  private:
+    LLVMContext &Context;
+    LLLexer Lex;
+    Module *M;
+    SlotMapping *Slots;
+
+    // Instruction metadata resolution.  Each instruction can have a list of
+    // MDRef info associated with them.
+    //
+    // The simpler approach of just creating temporary MDNodes and then calling
+    // RAUW on them when the definition is processed doesn't work because some
+    // instruction metadata kinds, such as dbg, get stored in the IR in an
+    // "optimized" format which doesn't participate in the normal value use
+    // lists. This means that RAUW doesn't work, even on temporary MDNodes
+    // which otherwise support RAUW. Instead, we defer resolving MDNode
+    // references until the definitions have been processed.
+    struct MDRef {
+      SMLoc Loc;
+      unsigned MDKind, MDSlot;
+    };
+
+    SmallVector<Instruction*, 64> InstsWithTBAATag;
+
+    // Type resolution handling data structures.  The location is set when we
+    // have processed a use of the type but not a definition yet.
+    StringMap<std::pair<Type*, LocTy> > NamedTypes;
+    std::map<unsigned, std::pair<Type*, LocTy> > NumberedTypes;
+
+    std::map<unsigned, TrackingMDNodeRef> NumberedMetadata;
+    std::map<unsigned, std::pair<TempMDTuple, LocTy>> ForwardRefMDNodes;
+
+    // Global Value reference information.
+    std::map<std::string, std::pair<GlobalValue*, LocTy> > ForwardRefVals;
+    std::map<unsigned, std::pair<GlobalValue*, LocTy> > ForwardRefValIDs;
+    std::vector<GlobalValue*> NumberedVals;
+
+    // Comdat forward reference information.
+    std::map<std::string, LocTy> ForwardRefComdats;
+
+    // References to blockaddress.  The key is the function ValID, the value is
+    // a list of references to blocks in that function.
+    std::map<ValID, std::map<ValID, GlobalValue *>> ForwardRefBlockAddresses;
+    class PerFunctionState;
+    /// Reference to per-function state to allow basic blocks to be
+    /// forward-referenced by blockaddress instructions within the same
+    /// function.
+    PerFunctionState *BlockAddressPFS;
+
+    // Attribute builder reference information.
+    std::map<Value*, std::vector<unsigned> > ForwardRefAttrGroups;
+    std::map<unsigned, AttrBuilder> NumberedAttrBuilders;
+
+  public:
+    LLParser(StringRef F, SourceMgr &SM, SMDiagnostic &Err, Module *M,
+             SlotMapping *Slots = nullptr)
+        : Context(M->getContext()), Lex(F, SM, Err, M->getContext()), M(M),
+          Slots(Slots), BlockAddressPFS(nullptr) {}
+    bool Run();
+
+    bool parseStandaloneConstantValue(Constant *&C, const SlotMapping *Slots);
+
+    bool parseTypeAtBeginning(Type *&Ty, unsigned &Read,
+                              const SlotMapping *Slots);
+
+    LLVMContext &getContext() { return Context; }
+
+  private:
+
+    bool Error(LocTy L, const Twine &Msg) const {
+      return Lex.Error(L, Msg);
+    }
+    bool TokError(const Twine &Msg) const {
+      return Error(Lex.getLoc(), Msg);
+    }
+
+    /// Restore the internal name and slot mappings using the mappings that
+    /// were created at an earlier parsing stage.
+    void restoreParsingState(const SlotMapping *Slots);
+
+    /// GetGlobalVal - Get a value with the specified name or ID, creating a
+    /// forward reference record if needed.  This can return null if the value
+    /// exists but does not have the right type.
+    GlobalValue *GetGlobalVal(const std::string &N, Type *Ty, LocTy Loc);
+    GlobalValue *GetGlobalVal(unsigned ID, Type *Ty, LocTy Loc);
+
+    /// Get a Comdat with the specified name, creating a forward reference
+    /// record if needed.
+    Comdat *getComdat(const std::string &N, LocTy Loc);
+
+    // Helper Routines.
+    bool ParseToken(lltok::Kind T, const char *ErrMsg);
+    bool EatIfPresent(lltok::Kind T) {
+      if (Lex.getKind() != T) return false;
+      Lex.Lex();
+      return true;
+    }
+
+    FastMathFlags EatFastMathFlagsIfPresent() {
+      FastMathFlags FMF;
+      while (true)
+        switch (Lex.getKind()) {
+        case lltok::kw_fast: FMF.setUnsafeAlgebra();   Lex.Lex(); continue;
+        case lltok::kw_nnan: FMF.setNoNaNs();          Lex.Lex(); continue;
+        case lltok::kw_ninf: FMF.setNoInfs();          Lex.Lex(); continue;
+        case lltok::kw_nsz:  FMF.setNoSignedZeros();   Lex.Lex(); continue;
+        case lltok::kw_arcp: FMF.setAllowReciprocal(); Lex.Lex(); continue;
+        default: return FMF;
+        }
+      return FMF;
+    }
+
+    bool ParseOptionalToken(lltok::Kind T, bool &Present,
+                            LocTy *Loc = nullptr) {
+      if (Lex.getKind() != T) {
+        Present = false;
+      } else {
+        if (Loc)
+          *Loc = Lex.getLoc();
+        Lex.Lex();
+        Present = true;
+      }
+      return false;
+    }
+    bool ParseStringConstant(std::string &Result);
+    bool ParseUInt32(unsigned &Val);
+    bool ParseUInt32(unsigned &Val, LocTy &Loc) {
+      Loc = Lex.getLoc();
+      return ParseUInt32(Val);
+    }
+    bool ParseUInt64(uint64_t &Val);
+    bool ParseUInt64(uint64_t &Val, LocTy &Loc) {
+      Loc = Lex.getLoc();
+      return ParseUInt64(Val);
+    }
+
+    bool ParseStringAttribute(AttrBuilder &B);
+
+    bool ParseTLSModel(GlobalVariable::ThreadLocalMode &TLM);
+    bool ParseOptionalThreadLocal(GlobalVariable::ThreadLocalMode &TLM);
+    bool ParseOptionalUnnamedAddr(GlobalVariable::UnnamedAddr &UnnamedAddr);
+    bool ParseOptionalAddrSpace(unsigned &AddrSpace);
+    bool ParseOptionalParamAttrs(AttrBuilder &B);
+    bool ParseOptionalReturnAttrs(AttrBuilder &B);
+    bool ParseOptionalLinkage(unsigned &Linkage, bool &HasLinkage,
+                              unsigned &Visibility, unsigned &DLLStorageClass);
+    void ParseOptionalVisibility(unsigned &Visibility);
+    void ParseOptionalDLLStorageClass(unsigned &DLLStorageClass);
+    bool ParseOptionalCallingConv(unsigned &CC);
+    bool ParseOptionalAlignment(unsigned &Alignment);
+    bool ParseOptionalDerefAttrBytes(lltok::Kind AttrKind, uint64_t &Bytes);
+    bool ParseScopeAndOrdering(bool isAtomic, SynchronizationScope &Scope,
+                               AtomicOrdering &Ordering);
+    bool ParseOrdering(AtomicOrdering &Ordering);
+    bool ParseOptionalStackAlignment(unsigned &Alignment);
+    bool ParseOptionalCommaAlign(unsigned &Alignment, bool &AteExtraComma);
+    bool ParseOptionalCommaInAlloca(bool &IsInAlloca);
+    bool parseAllocSizeArguments(unsigned &ElemSizeArg,
+                                 Optional<unsigned> &HowManyArg);
+    bool ParseIndexList(SmallVectorImpl<unsigned> &Indices,
+                        bool &AteExtraComma);
+    bool ParseIndexList(SmallVectorImpl<unsigned> &Indices) {
+      bool AteExtraComma;
+      if (ParseIndexList(Indices, AteExtraComma)) return true;
+      if (AteExtraComma)
+        return TokError("expected index");
+      return false;
+    }
+
+    // Top-Level Entities
+    bool ParseTopLevelEntities();
+    bool ValidateEndOfModule();
+    bool ParseTargetDefinition();
+    bool ParseModuleAsm();
+    bool ParseSourceFileName();
+    bool ParseDepLibs();        // FIXME: Remove in 4.0.
+    bool ParseUnnamedType();
+    bool ParseNamedType();
+    bool ParseDeclare();
+    bool ParseDefine();
+
+    bool ParseGlobalType(bool &IsConstant);
+    bool ParseUnnamedGlobal();
+    bool ParseNamedGlobal();
+    bool ParseGlobal(const std::string &Name, LocTy Loc, unsigned Linkage,
+                     bool HasLinkage, unsigned Visibility,
+                     unsigned DLLStorageClass,
+                     GlobalVariable::ThreadLocalMode TLM,
+                     GlobalVariable::UnnamedAddr UnnamedAddr);
+    bool parseIndirectSymbol(const std::string &Name, LocTy Loc,
+                             unsigned Linkage, unsigned Visibility,
+                             unsigned DLLStorageClass,
+                             GlobalVariable::ThreadLocalMode TLM,
+                             GlobalVariable::UnnamedAddr UnnamedAddr);
+    bool parseComdat();
+    bool ParseStandaloneMetadata();
+    bool ParseNamedMetadata();
+    bool ParseMDString(MDString *&Result);
+    bool ParseMDNodeID(MDNode *&Result);
+    bool ParseUnnamedAttrGrp();
+    bool ParseFnAttributeValuePairs(AttrBuilder &B,
+                                    std::vector<unsigned> &FwdRefAttrGrps,
+                                    bool inAttrGrp, LocTy &BuiltinLoc);
+
+    // Type Parsing.
+    bool ParseType(Type *&Result, const Twine &Msg, bool AllowVoid = false);
+    bool ParseType(Type *&Result, bool AllowVoid = false) {
+      return ParseType(Result, "expected type", AllowVoid);
+    }
+    bool ParseType(Type *&Result, const Twine &Msg, LocTy &Loc,
+                   bool AllowVoid = false) {
+      Loc = Lex.getLoc();
+      return ParseType(Result, Msg, AllowVoid);
+    }
+    bool ParseType(Type *&Result, LocTy &Loc, bool AllowVoid = false) {
+      Loc = Lex.getLoc();
+      return ParseType(Result, AllowVoid);
+    }
+    bool ParseAnonStructType(Type *&Result, bool Packed);
+    bool ParseStructBody(SmallVectorImpl<Type*> &Body);
+    bool ParseStructDefinition(SMLoc TypeLoc, StringRef Name,
+                               std::pair<Type*, LocTy> &Entry,
+                               Type *&ResultTy);
+
+    bool ParseArrayVectorType(Type *&Result, bool isVector);
+    bool ParseFunctionType(Type *&Result);
+
+    // Function Semantic Analysis.
+    class PerFunctionState {
+      LLParser &P;
+      Function &F;
+      std::map<std::string, std::pair<Value*, LocTy> > ForwardRefVals;
+      std::map<unsigned, std::pair<Value*, LocTy> > ForwardRefValIDs;
+      std::vector<Value*> NumberedVals;
+
+      /// FunctionNumber - If this is an unnamed function, this is the slot
+      /// number of it, otherwise it is -1.
+      int FunctionNumber;
+    public:
+      PerFunctionState(LLParser &p, Function &f, int FunctionNumber);
+      ~PerFunctionState();
+
+      Function &getFunction() const { return F; }
+
+      bool FinishFunction();
+
+      /// GetVal - Get a value with the specified name or ID, creating a
+      /// forward reference record if needed.  This can return null if the value
+      /// exists but does not have the right type.
+      Value *GetVal(const std::string &Name, Type *Ty, LocTy Loc);
+      Value *GetVal(unsigned ID, Type *Ty, LocTy Loc);
+
+      /// SetInstName - After an instruction is parsed and inserted into its
+      /// basic block, this installs its name.
+      bool SetInstName(int NameID, const std::string &NameStr, LocTy NameLoc,
+                       Instruction *Inst);
+
+      /// GetBB - Get a basic block with the specified name or ID, creating a
+      /// forward reference record if needed.  This can return null if the value
+      /// is not a BasicBlock.
+      BasicBlock *GetBB(const std::string &Name, LocTy Loc);
+      BasicBlock *GetBB(unsigned ID, LocTy Loc);
+
+      /// DefineBB - Define the specified basic block, which is either named or
+      /// unnamed.  If there is an error, this returns null otherwise it returns
+      /// the block being defined.
+      BasicBlock *DefineBB(const std::string &Name, LocTy Loc);
+
+      bool resolveForwardRefBlockAddresses();
+    };
+
+    bool ConvertValIDToValue(Type *Ty, ValID &ID, Value *&V,
+                             PerFunctionState *PFS);
+
+    bool parseConstantValue(Type *Ty, Constant *&C);
+    bool ParseValue(Type *Ty, Value *&V, PerFunctionState *PFS);
+    bool ParseValue(Type *Ty, Value *&V, PerFunctionState &PFS) {
+      return ParseValue(Ty, V, &PFS);
+    }
+
+    bool ParseValue(Type *Ty, Value *&V, LocTy &Loc,
+                    PerFunctionState &PFS) {
+      Loc = Lex.getLoc();
+      return ParseValue(Ty, V, &PFS);
+    }
+
+    bool ParseTypeAndValue(Value *&V, PerFunctionState *PFS);
+    bool ParseTypeAndValue(Value *&V, PerFunctionState &PFS) {
+      return ParseTypeAndValue(V, &PFS);
+    }
+    bool ParseTypeAndValue(Value *&V, LocTy &Loc, PerFunctionState &PFS) {
+      Loc = Lex.getLoc();
+      return ParseTypeAndValue(V, PFS);
+    }
+    bool ParseTypeAndBasicBlock(BasicBlock *&BB, LocTy &Loc,
+                                PerFunctionState &PFS);
+    bool ParseTypeAndBasicBlock(BasicBlock *&BB, PerFunctionState &PFS) {
+      LocTy Loc;
+      return ParseTypeAndBasicBlock(BB, Loc, PFS);
+    }
+
+
+    struct ParamInfo {
+      LocTy Loc;
+      Value *V;
+      AttributeSet Attrs;
+      ParamInfo(LocTy loc, Value *v, AttributeSet attrs)
+        : Loc(loc), V(v), Attrs(attrs) {}
+    };
+    bool ParseParameterList(SmallVectorImpl<ParamInfo> &ArgList,
+                            PerFunctionState &PFS,
+                            bool IsMustTailCall = false,
+                            bool InVarArgsFunc = false);
+
+    bool
+    ParseOptionalOperandBundles(SmallVectorImpl<OperandBundleDef> &BundleList,
+                                PerFunctionState &PFS);
+
+    bool ParseExceptionArgs(SmallVectorImpl<Value *> &Args,
+                            PerFunctionState &PFS);
+
+    // Constant Parsing.
+    bool ParseValID(ValID &ID, PerFunctionState *PFS = nullptr);
+    bool ParseGlobalValue(Type *Ty, Constant *&V);
+    bool ParseGlobalTypeAndValue(Constant *&V);
+    bool ParseGlobalValueVector(SmallVectorImpl<Constant *> &Elts,
+                                Optional<unsigned> *InRangeOp = nullptr);
+    bool parseOptionalComdat(StringRef GlobalName, Comdat *&C);
+    bool ParseMetadataAsValue(Value *&V, PerFunctionState &PFS);
+    bool ParseValueAsMetadata(Metadata *&MD, const Twine &TypeMsg,
+                              PerFunctionState *PFS);
+    bool ParseMetadata(Metadata *&MD, PerFunctionState *PFS);
+    bool ParseMDTuple(MDNode *&MD, bool IsDistinct = false);
+    bool ParseMDNode(MDNode *&MD);
+    bool ParseMDNodeTail(MDNode *&MD);
+    bool ParseMDNodeVector(SmallVectorImpl<Metadata *> &MDs);
+    bool ParseMetadataAttachment(unsigned &Kind, MDNode *&MD);
+    bool ParseInstructionMetadata(Instruction &Inst);
+    bool ParseGlobalObjectMetadataAttachment(GlobalObject &GO);
+    bool ParseOptionalFunctionMetadata(Function &F);
+
+    template <class FieldTy>
+    bool ParseMDField(LocTy Loc, StringRef Name, FieldTy &Result);
+    template <class FieldTy> bool ParseMDField(StringRef Name, FieldTy &Result);
+    template <class ParserTy>
+    bool ParseMDFieldsImplBody(ParserTy parseField);
+    template <class ParserTy>
+    bool ParseMDFieldsImpl(ParserTy parseField, LocTy &ClosingLoc);
+    bool ParseSpecializedMDNode(MDNode *&N, bool IsDistinct = false);
+
+#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)                                  \
+  bool Parse##CLASS(MDNode *&Result, bool IsDistinct);
+#include "llvm/IR/Metadata.def"
+
+    // Function Parsing.
+    struct ArgInfo {
+      LocTy Loc;
+      Type *Ty;
+      AttributeSet Attrs;
+      std::string Name;
+      ArgInfo(LocTy L, Type *ty, AttributeSet Attr, const std::string &N)
+        : Loc(L), Ty(ty), Attrs(Attr), Name(N) {}
+    };
+    bool ParseArgumentList(SmallVectorImpl<ArgInfo> &ArgList, bool &isVarArg);
+    bool ParseFunctionHeader(Function *&Fn, bool isDefine);
+    bool ParseFunctionBody(Function &Fn);
+    bool ParseBasicBlock(PerFunctionState &PFS);
+
+    enum TailCallType { TCT_None, TCT_Tail, TCT_MustTail };
+
+    // Instruction Parsing.  Each instruction parsing routine can return with a
+    // normal result, an error result, or return having eaten an extra comma.
+    enum InstResult { InstNormal = 0, InstError = 1, InstExtraComma = 2 };
+    int ParseInstruction(Instruction *&Inst, BasicBlock *BB,
+                         PerFunctionState &PFS);
+    bool ParseCmpPredicate(unsigned &Pred, unsigned Opc);
+
+    bool ParseRet(Instruction *&Inst, BasicBlock *BB, PerFunctionState &PFS);
+    bool ParseBr(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseSwitch(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseIndirectBr(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseInvoke(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseResume(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseCleanupRet(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseCatchRet(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseCatchSwitch(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseCatchPad(Instruction *&Inst, PerFunctionState &PFS);
+    bool ParseCleanupPad(Instruction *&Inst, PerFunctionState &PFS);
+
+    bool ParseArithmetic(Instruction *&I, PerFunctionState &PFS, unsigned Opc,
+                         unsigned OperandType);
+    bool ParseLogical(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
+    bool ParseCompare(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
+    bool ParseCast(Instruction *&I, PerFunctionState &PFS, unsigned Opc);
+    bool ParseSelect(Instruction *&I, PerFunctionState &PFS);
+    bool ParseVA_Arg(Instruction *&I, PerFunctionState &PFS);
+    bool ParseExtractElement(Instruction *&I, PerFunctionState &PFS);
+    bool ParseInsertElement(Instruction *&I, PerFunctionState &PFS);
+    bool ParseShuffleVector(Instruction *&I, PerFunctionState &PFS);
+    int ParsePHI(Instruction *&I, PerFunctionState &PFS);
+    bool ParseLandingPad(Instruction *&I, PerFunctionState &PFS);
+    bool ParseCall(Instruction *&I, PerFunctionState &PFS,
+                   CallInst::TailCallKind IsTail);
+    int ParseAlloc(Instruction *&I, PerFunctionState &PFS);
+    int ParseLoad(Instruction *&I, PerFunctionState &PFS);
+    int ParseStore(Instruction *&I, PerFunctionState &PFS);
+    int ParseCmpXchg(Instruction *&I, PerFunctionState &PFS);
+    int ParseAtomicRMW(Instruction *&I, PerFunctionState &PFS);
+    int ParseFence(Instruction *&I, PerFunctionState &PFS);
+    int ParseGetElementPtr(Instruction *&I, PerFunctionState &PFS);
+    int ParseExtractValue(Instruction *&I, PerFunctionState &PFS);
+    int ParseInsertValue(Instruction *&I, PerFunctionState &PFS);
+
+    // Use-list order directives.
+    bool ParseUseListOrder(PerFunctionState *PFS = nullptr);
+    bool ParseUseListOrderBB();
+    bool ParseUseListOrderIndexes(SmallVectorImpl<unsigned> &Indexes);
+    bool sortUseListOrder(Value *V, ArrayRef<unsigned> Indexes, SMLoc Loc);
+  };
+} // End llvm namespace
+
+#endif
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.h.patch b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLParser.h.patch
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h
new file mode 100644
index 0000000000..0c05d51544
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h
@@ -0,0 +1,371 @@
+//===- LLToken.h - Token Codes for LLVM Assembly Files ----------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the enums for the .ll lexer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_ASMPARSER_LLTOKEN_H
+#define LLVM_LIB_ASMPARSER_LLTOKEN_H
+
+namespace llvm {
+namespace lltok {
+enum Kind {
+  // Markers
+  Eof,
+  Error,
+
+  // Tokens with no info.
+  dotdotdot, // ...
+  equal,
+  comma, // =  ,
+  star,  // *
+  lsquare,
+  rsquare, // [  ]
+  lbrace,
+  rbrace, // {  }
+  less,
+  greater, // <  >
+  lparen,
+  rparen,  // (  )
+  exclaim, // !
+  bar,     // |
+
+  kw_x,
+  kw_true,
+  kw_false,
+  kw_declare,
+  kw_define,
+  kw_global,
+  kw_constant,
+
+  kw_private,
+  kw_internal,
+  kw_linkonce,
+  kw_linkonce_odr,
+  kw_weak, // Used as a linkage, and a modifier for "cmpxchg".
+  kw_weak_odr,
+  kw_appending,
+  kw_dllimport,
+  kw_dllexport,
+  kw_common,
+  kw_available_externally,
+  kw_default,
+  kw_hidden,
+  kw_protected,
+  kw_unnamed_addr,
+  kw_local_unnamed_addr,
+  kw_externally_initialized,
+  kw_extern_weak,
+  kw_external,
+  kw_thread_local,
+  kw_localdynamic,
+  kw_initialexec,
+  kw_localexec,
+  kw_zeroinitializer,
+  kw_undef,
+  kw_null,
+  kw_none,
+  kw_to,
+  kw_caller,
+  kw_within,
+  kw_from,
+  kw_tail,
+  kw_musttail,
+  kw_notail,
+  kw_target,
+  kw_triple,
+  kw_source_filename,
+  kw_unwind,
+  kw_deplibs, // FIXME: Remove in 4.0
+  kw_datalayout,
+  kw_volatile,
+  kw_atomic,
+  kw_unordered,
+  kw_monotonic,
+  kw_acquire,
+  kw_release,
+  kw_acq_rel,
+  kw_seq_cst,
+  kw_singlethread,
+  kw_nnan,
+  kw_ninf,
+  kw_nsz,
+  kw_arcp,
+  kw_fast,
+  kw_nuw,
+  kw_nsw,
+  kw_exact,
+  kw_inbounds,
+  kw_inrange,
+  kw_align,
+  kw_addrspace,
+  kw_section,
+  kw_alias,
+  kw_ifunc,
+  kw_module,
+  kw_asm,
+  kw_sideeffect,
+  kw_alignstack,
+  kw_inteldialect,
+  kw_gc,
+  kw_prefix,
+  kw_prologue,
+  kw_c,
+
+  kw_cc,
+  kw_ccc,
+  kw_fastcc,
+  kw_coldcc,
+  kw_intel_ocl_bicc,
+  kw_x86_stdcallcc,
+  kw_x86_fastcallcc,
+  kw_x86_thiscallcc,
+  kw_x86_vectorcallcc,
+  kw_x86_regcallcc,
+  kw_arm_apcscc,
+  kw_arm_aapcscc,
+  kw_arm_aapcs_vfpcc,
+  kw_msp430_intrcc,
+  kw_avr_intrcc,
+  kw_avr_signalcc,
+  kw_ptx_kernel,
+  kw_ptx_device,
+  kw_spir_kernel,
+  kw_spir_func,
+  kw_x86_64_sysvcc,
+  kw_x86_64_win64cc,
+  kw_webkit_jscc,
+  kw_anyregcc,
+  kw_swiftcc,
+  kw_preserve_mostcc,
+  kw_preserve_allcc,
+  kw_ghccc,
+  kw_x86_intrcc,
+  kw_hhvmcc,
+  kw_hhvm_ccc,
+  kw_cxx_fast_tlscc,
+  kw_amdgpu_vs,
+  kw_amdgpu_gs,
+  kw_amdgpu_ps,
+  kw_amdgpu_cs,
+  kw_amdgpu_kernel,
+
+  // Attributes:
+  kw_attributes,
+  kw_allocsize,
+  kw_alwaysinline,
+  kw_argmemonly,
+  kw_sanitize_address,
+  kw_builtin,
+  kw_byval,
+  kw_inalloca,
+  kw_cold,
+  kw_convergent,
+  kw_dereferenceable,
+  kw_dereferenceable_or_null,
+  kw_inaccessiblememonly,
+  kw_inaccessiblemem_or_argmemonly,
+  kw_inlinehint,
+  kw_inreg,
+  kw_jumptable,
+  kw_minsize,
+  kw_naked,
+  kw_nest,
+  kw_noalias,
+  kw_nobuiltin,
+  kw_nocapture,
+  kw_noduplicate,
+  kw_noimplicitfloat,
+  kw_noinline,
+  kw_norecurse,
+  kw_nonlazybind,
+  kw_nonnull,
+  kw_noredzone,
+  kw_noreturn,
+  kw_nounwind,
+  kw_optnone,
+  kw_optsize,
+  kw_readnone,
+  kw_readonly,
+  kw_returned,
+  kw_returns_twice,
+  kw_signext,
+  kw_ssp,
+  kw_sspreq,
+  kw_sspstrong,
+  kw_safestack,
+  kw_sret,
+  kw_sanitize_thread,
+  kw_sanitize_memory,
+  kw_swifterror,
+  kw_swiftself,
+  kw_uwtable,
+  kw_writeonly,
+  kw_zeroext,
+  // VISC parameter attributes
+  kw_in,
+  kw_out,
+  kw_inout,
+
+  kw_type,
+  kw_opaque,
+
+  kw_comdat,
+
+  // Comdat types
+  kw_any,
+  kw_exactmatch,
+  kw_largest,
+  kw_noduplicates,
+  kw_samesize,
+
+  kw_eq,
+  kw_ne,
+  kw_slt,
+  kw_sgt,
+  kw_sle,
+  kw_sge,
+  kw_ult,
+  kw_ugt,
+  kw_ule,
+  kw_uge,
+  kw_oeq,
+  kw_one,
+  kw_olt,
+  kw_ogt,
+  kw_ole,
+  kw_oge,
+  kw_ord,
+  kw_uno,
+  kw_ueq,
+  kw_une,
+
+  // atomicrmw operations that aren't also instruction keywords.
+  kw_xchg,
+  kw_nand,
+  kw_max,
+  kw_min,
+  kw_umax,
+  kw_umin,
+
+  // Instruction Opcodes (Opcode in UIntVal).
+  kw_add,
+  kw_fadd,
+  kw_sub,
+  kw_fsub,
+  kw_mul,
+  kw_fmul,
+  kw_udiv,
+  kw_sdiv,
+  kw_fdiv,
+  kw_urem,
+  kw_srem,
+  kw_frem,
+  kw_shl,
+  kw_lshr,
+  kw_ashr,
+  kw_and,
+  kw_or,
+  kw_xor,
+  kw_icmp,
+  kw_fcmp,
+
+  kw_phi,
+  kw_call,
+  kw_trunc,
+  kw_zext,
+  kw_sext,
+  kw_fptrunc,
+  kw_fpext,
+  kw_uitofp,
+  kw_sitofp,
+  kw_fptoui,
+  kw_fptosi,
+  kw_inttoptr,
+  kw_ptrtoint,
+  kw_bitcast,
+  kw_addrspacecast,
+  kw_select,
+  kw_va_arg,
+
+  kw_landingpad,
+  kw_personality,
+  kw_cleanup,
+  kw_catch,
+  kw_filter,
+
+  kw_ret,
+  kw_br,
+  kw_switch,
+  kw_indirectbr,
+  kw_invoke,
+  kw_resume,
+  kw_unreachable,
+  kw_cleanupret,
+  kw_catchswitch,
+  kw_catchret,
+  kw_catchpad,
+  kw_cleanuppad,
+
+  kw_alloca,
+  kw_load,
+  kw_store,
+  kw_fence,
+  kw_cmpxchg,
+  kw_atomicrmw,
+  kw_getelementptr,
+
+  kw_extractelement,
+  kw_insertelement,
+  kw_shufflevector,
+  kw_extractvalue,
+  kw_insertvalue,
+  kw_blockaddress,
+
+  // Metadata types.
+  kw_distinct,
+
+  // Use-list order directives.
+  kw_uselistorder,
+  kw_uselistorder_bb,
+
+  // Unsigned Valued tokens (UIntVal).
+  GlobalID,   // @42
+  LocalVarID, // %42
+  AttrGrpID,  // #42
+
+  // String valued tokens (StrVal).
+  LabelStr,         // foo:
+  GlobalVar,        // @foo @"foo"
+  ComdatVar,        // $foo
+  LocalVar,         // %foo %"foo"
+  MetadataVar,      // !foo
+  StringConstant,   // "foo"
+  DwarfTag,         // DW_TAG_foo
+  DwarfAttEncoding, // DW_ATE_foo
+  DwarfVirtuality,  // DW_VIRTUALITY_foo
+  DwarfLang,        // DW_LANG_foo
+  DwarfCC,          // DW_CC_foo
+  EmissionKind,     // lineTablesOnly
+  DwarfOp,          // DW_OP_foo
+  DIFlag,           // DIFlagFoo
+  DwarfMacinfo,     // DW_MACINFO_foo
+  ChecksumKind,     // CSK_foo
+
+  // Type valued tokens (TyVal).
+  Type,
+
+  APFloat, // APFloatVal
+  APSInt   // APSInt
+};
+} // end namespace lltok
+} // end namespace llvm
+
+#endif
diff --git a/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h.patch b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h.patch
new file mode 100644
index 0000000000..fc3b2e05cf
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/AsmParser/LLToken.h.patch
@@ -0,0 +1,13 @@
+--- ../../../lib/AsmParser/LLToken.h	2019-12-29 18:23:35.468919129 -0600
++++ lib/AsmParser/LLToken.h	2019-12-29 18:44:52.957512419 -0600
+@@ -209,6 +209,10 @@
+   kw_uwtable,
+   kw_writeonly,
+   kw_zeroext,
++  // VISC parameter attributes
++  kw_in,
++  kw_out,
++  kw_inout,
+ 
+   kw_type,
+   kw_opaque,
diff --git a/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp
new file mode 100644
index 0000000000..ba22f47cd7
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -0,0 +1,5408 @@
+//===- BitcodeReader.cpp - Internal BitcodeReader implementation ----------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "MetadataLoader.h"
+#include "ValueList.h"
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalIndirectSymbol.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <limits>
+#include <map>
+#include <memory>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+static cl::opt<bool> PrintSummaryGUIDs(
+    "print-summary-global-ids", cl::init(false), cl::Hidden,
+    cl::desc(
+        "Print the global id for each value when reading the module summary"));
+
+namespace {
+
+enum {
+  SWITCH_INST_MAGIC = 0x4B5 // May 2012 => 1205 => Hex
+};
+
+Error error(const Twine &Message) {
+  return make_error<StringError>(
+      Message, make_error_code(BitcodeError::CorruptedBitcode));
+}
+
+/// Helper to read the header common to all bitcode files.
+bool hasValidBitcodeHeader(BitstreamCursor &Stream) {
+  // Sniff for the signature.
+  if (!Stream.canSkipToPos(4) ||
+      Stream.Read(8) != 'B' ||
+      Stream.Read(8) != 'C' ||
+      Stream.Read(4) != 0x0 ||
+      Stream.Read(4) != 0xC ||
+      Stream.Read(4) != 0xE ||
+      Stream.Read(4) != 0xD)
+    return false;
+  return true;
+}
+
+Expected<BitstreamCursor> initStream(MemoryBufferRef Buffer) {
+  const unsigned char *BufPtr = (const unsigned char *)Buffer.getBufferStart();
+  const unsigned char *BufEnd = BufPtr + Buffer.getBufferSize();
+
+  if (Buffer.getBufferSize() & 3)
+    return error("Invalid bitcode signature");
+
+  // If we have a wrapper header, parse it and ignore the non-bc file contents.
+  // The magic number is 0x0B17C0DE stored in little endian.
+  if (isBitcodeWrapper(BufPtr, BufEnd))
+    if (SkipBitcodeWrapperHeader(BufPtr, BufEnd, true))
+      return error("Invalid bitcode wrapper header");
+
+  BitstreamCursor Stream(ArrayRef<uint8_t>(BufPtr, BufEnd));
+  if (!hasValidBitcodeHeader(Stream))
+    return error("Invalid bitcode signature");
+
+  return std::move(Stream);
+}
+
+/// Convert a string from a record into an std::string, return true on failure.
+template <typename StrTy>
+static bool convertToString(ArrayRef<uint64_t> Record, unsigned Idx,
+                            StrTy &Result) {
+  if (Idx > Record.size())
+    return true;
+
+  for (unsigned i = Idx, e = Record.size(); i != e; ++i)
+    Result += (char)Record[i];
+  return false;
+}
+
+// Strip all the TBAA attachment for the module.
+void stripTBAA(Module *M) {
+  for (auto &F : *M) {
+    if (F.isMaterializable())
+      continue;
+    for (auto &I : instructions(F))
+      I.setMetadata(LLVMContext::MD_tbaa, nullptr);
+  }
+}
+
+/// Read the "IDENTIFICATION_BLOCK_ID" block, do some basic enforcement on the
+/// "epoch" encoded in the bitcode, and return the producer name if any.
+Expected<std::string> readIdentificationBlock(BitstreamCursor &Stream) {
+  if (Stream.EnterSubBlock(bitc::IDENTIFICATION_BLOCK_ID))
+    return error("Invalid record");
+
+  // Read all the records.
+  SmallVector<uint64_t, 64> Record;
+
+  std::string ProducerIdentification;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+
+    switch (Entry.Kind) {
+    default:
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return ProducerIdentification;
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    unsigned BitCode = Stream.readRecord(Entry.ID, Record);
+    switch (BitCode) {
+    default: // Default behavior: reject
+      return error("Invalid value");
+    case bitc::IDENTIFICATION_CODE_STRING: // IDENTIFICATION: [strchr x N]
+      convertToString(Record, 0, ProducerIdentification);
+      break;
+    case bitc::IDENTIFICATION_CODE_EPOCH: { // EPOCH: [epoch#]
+      unsigned epoch = (unsigned)Record[0];
+      if (epoch != bitc::BITCODE_CURRENT_EPOCH) {
+        return error(
+          Twine("Incompatible epoch: Bitcode '") + Twine(epoch) +
+          "' vs current: '" + Twine(bitc::BITCODE_CURRENT_EPOCH) + "'");
+      }
+    }
+    }
+  }
+}
+
+Expected<std::string> readIdentificationCode(BitstreamCursor &Stream) {
+  // We expect a number of well-defined blocks, though we don't necessarily
+  // need to understand them all.
+  while (true) {
+    if (Stream.AtEndOfStream())
+      return "";
+
+    BitstreamEntry Entry = Stream.advance();
+    switch (Entry.Kind) {
+    case BitstreamEntry::EndBlock:
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+
+    case BitstreamEntry::SubBlock:
+      if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID)
+        return readIdentificationBlock(Stream);
+
+      // Ignore other sub-blocks.
+      if (Stream.SkipBlock())
+        return error("Malformed block");
+      continue;
+    case BitstreamEntry::Record:
+      Stream.skipRecord(Entry.ID);
+      continue;
+    }
+  }
+}
+
+Expected<bool> hasObjCCategoryInModule(BitstreamCursor &Stream) {
+  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+  // Read all the records for this module.
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return false;
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default:
+      break; // Default behavior, ignore unknown content.
+    case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N]
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      // Check for the i386 and other (x86_64, ARM) conventions
+      if (S.find("__DATA, __objc_catlist") != std::string::npos ||
+          S.find("__OBJC,__category") != std::string::npos)
+        return true;
+      break;
+    }
+    }
+    Record.clear();
+  }
+  llvm_unreachable("Exit infinite loop");
+}
+
+Expected<bool> hasObjCCategory(BitstreamCursor &Stream) {
+  // We expect a number of well-defined blocks, though we don't necessarily
+  // need to understand them all.
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return false;
+
+    case BitstreamEntry::SubBlock:
+      if (Entry.ID == bitc::MODULE_BLOCK_ID)
+        return hasObjCCategoryInModule(Stream);
+
+      // Ignore other sub-blocks.
+      if (Stream.SkipBlock())
+        return error("Malformed block");
+      continue;
+
+    case BitstreamEntry::Record:
+      Stream.skipRecord(Entry.ID);
+      continue;
+    }
+  }
+}
+
+Expected<std::string> readModuleTriple(BitstreamCursor &Stream) {
+  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+
+  std::string Triple;
+
+  // Read all the records for this module.
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return Triple;
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default: break;  // Default behavior, ignore unknown content.
+    case bitc::MODULE_CODE_TRIPLE: {  // TRIPLE: [strchr x N]
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      Triple = S;
+      break;
+    }
+    }
+    Record.clear();
+  }
+  llvm_unreachable("Exit infinite loop");
+}
+
+Expected<std::string> readTriple(BitstreamCursor &Stream) {
+  // We expect a number of well-defined blocks, though we don't necessarily
+  // need to understand them all.
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return "";
+
+    case BitstreamEntry::SubBlock:
+      if (Entry.ID == bitc::MODULE_BLOCK_ID)
+        return readModuleTriple(Stream);
+
+      // Ignore other sub-blocks.
+      if (Stream.SkipBlock())
+        return error("Malformed block");
+      continue;
+
+    case BitstreamEntry::Record:
+      Stream.skipRecord(Entry.ID);
+      continue;
+    }
+  }
+}
+
+class BitcodeReaderBase {
+protected:
+  BitcodeReaderBase(BitstreamCursor Stream) : Stream(std::move(Stream)) {
+    this->Stream.setBlockInfo(&BlockInfo);
+  }
+
+  BitstreamBlockInfo BlockInfo;
+  BitstreamCursor Stream;
+
+  bool readBlockInfo();
+
+  // Contains an arbitrary and optional string identifying the bitcode producer
+  std::string ProducerIdentification;
+
+  Error error(const Twine &Message);
+};
+
+Error BitcodeReaderBase::error(const Twine &Message) {
+  std::string FullMsg = Message.str();
+  if (!ProducerIdentification.empty())
+    FullMsg += " (Producer: '" + ProducerIdentification + "' Reader: 'LLVM " +
+               LLVM_VERSION_STRING "')";
+  return ::error(FullMsg);
+}
+
+class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
+  LLVMContext &Context;
+  Module *TheModule = nullptr;
+  // Next offset to start scanning for lazy parsing of function bodies.
+  uint64_t NextUnreadBit = 0;
+  // Last function offset found in the VST.
+  uint64_t LastFunctionBlockBit = 0;
+  bool SeenValueSymbolTable = false;
+  uint64_t VSTOffset = 0;
+
+  std::vector<Type*> TypeList;
+  BitcodeReaderValueList ValueList;
+  Optional<MetadataLoader> MDLoader;
+  std::vector<Comdat *> ComdatList;
+  SmallVector<Instruction *, 64> InstructionList;
+
+  std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInits;
+  std::vector<std::pair<GlobalIndirectSymbol*, unsigned> > IndirectSymbolInits;
+  std::vector<std::pair<Function*, unsigned> > FunctionPrefixes;
+  std::vector<std::pair<Function*, unsigned> > FunctionPrologues;
+  std::vector<std::pair<Function*, unsigned> > FunctionPersonalityFns;
+
+  /// The set of attributes by index.  Index zero in the file is for null, and
+  /// is thus not represented here.  As such all indices are off by one.
+  std::vector<AttributeSet> MAttributes;
+
+  /// The set of attribute groups.
+  std::map<unsigned, AttributeSet> MAttributeGroups;
+
+  /// While parsing a function body, this is a list of the basic blocks for the
+  /// function.
+  std::vector<BasicBlock*> FunctionBBs;
+
+  // When reading the module header, this list is populated with functions that
+  // have bodies later in the file.
+  std::vector<Function*> FunctionsWithBodies;
+
+  // When intrinsic functions are encountered which require upgrading they are
+  // stored here with their replacement function.
+  typedef DenseMap<Function*, Function*> UpdatedIntrinsicMap;
+  UpdatedIntrinsicMap UpgradedIntrinsics;
+  // Intrinsics which were remangled because of types rename
+  UpdatedIntrinsicMap RemangledIntrinsics;
+
+  // Several operations happen after the module header has been read, but
+  // before function bodies are processed. This keeps track of whether
+  // we've done this yet.
+  bool SeenFirstFunctionBody = false;
+
+  /// When function bodies are initially scanned, this map contains info about
+  /// where to find deferred function body in the stream.
+  DenseMap<Function*, uint64_t> DeferredFunctionInfo;
+
+  /// When Metadata block is initially scanned when parsing the module, we may
+  /// choose to defer parsing of the metadata. This vector contains info about
+  /// which Metadata blocks are deferred.
+  std::vector<uint64_t> DeferredMetadataInfo;
+
+  /// These are basic blocks forward-referenced by block addresses.  They are
+  /// inserted lazily into functions when they're loaded.  The basic block ID is
+  /// its index into the vector.
+  DenseMap<Function *, std::vector<BasicBlock *>> BasicBlockFwdRefs;
+  std::deque<Function *> BasicBlockFwdRefQueue;
+
+  /// Indicates that we are using a new encoding for instruction operands where
+  /// most operands in the current FUNCTION_BLOCK are encoded relative to the
+  /// instruction number, for a more compact encoding.  Some instruction
+  /// operands are not relative to the instruction ID: basic block numbers, and
+  /// types. Once the old style function blocks have been phased out, we would
+  /// not need this flag.
+  bool UseRelativeIDs = false;
+
+  /// True if all functions will be materialized, negating the need to process
+  /// (e.g.) blockaddress forward references.
+  bool WillMaterializeAllForwardRefs = false;
+
+  bool StripDebugInfo = false;
+  TBAAVerifier TBAAVerifyHelper;
+
+  std::vector<std::string> BundleTags;
+
+public:
+  BitcodeReader(BitstreamCursor Stream, StringRef ProducerIdentification,
+                LLVMContext &Context);
+
+  Error materializeForwardReferencedFunctions();
+
+  Error materialize(GlobalValue *GV) override;
+  Error materializeModule() override;
+  std::vector<StructType *> getIdentifiedStructTypes() const override;
+
+  /// \brief Main interface to parsing a bitcode buffer.
+  /// \returns true if an error occurred.
+  Error parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata = false,
+                         bool IsImporting = false);
+
+  static uint64_t decodeSignRotatedValue(uint64_t V);
+
+  /// Materialize any deferred Metadata block.
+  Error materializeMetadata() override;
+
+  void setStripDebugInfo() override;
+
+private:
+  std::vector<StructType *> IdentifiedStructTypes;
+  StructType *createIdentifiedStructType(LLVMContext &Context, StringRef Name);
+  StructType *createIdentifiedStructType(LLVMContext &Context);
+
+  Type *getTypeByID(unsigned ID);
+
+  Value *getFnValueByID(unsigned ID, Type *Ty) {
+    if (Ty && Ty->isMetadataTy())
+      return MetadataAsValue::get(Ty->getContext(), getFnMetadataByID(ID));
+    return ValueList.getValueFwdRef(ID, Ty);
+  }
+
+  Metadata *getFnMetadataByID(unsigned ID) {
+    return MDLoader->getMetadataFwdRefOrLoad(ID);
+  }
+
+  BasicBlock *getBasicBlock(unsigned ID) const {
+    if (ID >= FunctionBBs.size()) return nullptr; // Invalid ID
+    return FunctionBBs[ID];
+  }
+
+  AttributeSet getAttributes(unsigned i) const {
+    if (i-1 < MAttributes.size())
+      return MAttributes[i-1];
+    return AttributeSet();
+  }
+
+  /// Read a value/type pair out of the specified record from slot 'Slot'.
+  /// Increment Slot past the number of slots used in the record. Return true on
+  /// failure.
+  bool getValueTypePair(SmallVectorImpl<uint64_t> &Record, unsigned &Slot,
+                        unsigned InstNum, Value *&ResVal) {
+    if (Slot == Record.size()) return true;
+    unsigned ValNo = (unsigned)Record[Slot++];
+    // Adjust the ValNo, if it was encoded relative to the InstNum.
+    if (UseRelativeIDs)
+      ValNo = InstNum - ValNo;
+    if (ValNo < InstNum) {
+      // If this is not a forward reference, just return the value we already
+      // have.
+      ResVal = getFnValueByID(ValNo, nullptr);
+      return ResVal == nullptr;
+    }
+    if (Slot == Record.size())
+      return true;
+
+    unsigned TypeNo = (unsigned)Record[Slot++];
+    ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo));
+    return ResVal == nullptr;
+  }
+
+  /// Read a value out of the specified record from slot 'Slot'. Increment Slot
+  /// past the number of slots used by the value in the record. Return true if
+  /// there is an error.
+  bool popValue(SmallVectorImpl<uint64_t> &Record, unsigned &Slot,
+                unsigned InstNum, Type *Ty, Value *&ResVal) {
+    if (getValue(Record, Slot, InstNum, Ty, ResVal))
+      return true;
+    // All values currently take a single record slot.
+    ++Slot;
+    return false;
+  }
+
+  /// Like popValue, but does not increment the Slot number.
+  bool getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
+                unsigned InstNum, Type *Ty, Value *&ResVal) {
+    ResVal = getValue(Record, Slot, InstNum, Ty);
+    return ResVal == nullptr;
+  }
+
+  /// Version of getValue that returns ResVal directly, or 0 if there is an
+  /// error.
+  Value *getValue(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
+                  unsigned InstNum, Type *Ty) {
+    if (Slot == Record.size()) return nullptr;
+    unsigned ValNo = (unsigned)Record[Slot];
+    // Adjust the ValNo, if it was encoded relative to the InstNum.
+    if (UseRelativeIDs)
+      ValNo = InstNum - ValNo;
+    return getFnValueByID(ValNo, Ty);
+  }
+
+  /// Like getValue, but decodes signed VBRs.
+  Value *getValueSigned(SmallVectorImpl<uint64_t> &Record, unsigned Slot,
+                        unsigned InstNum, Type *Ty) {
+    if (Slot == Record.size()) return nullptr;
+    unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]);
+    // Adjust the ValNo, if it was encoded relative to the InstNum.
+    if (UseRelativeIDs)
+      ValNo = InstNum - ValNo;
+    return getFnValueByID(ValNo, Ty);
+  }
+
+  /// Converts alignment exponent (i.e. power of two (or zero)) to the
+  /// corresponding alignment to use. If alignment is too large, returns
+  /// a corresponding error code.
+  Error parseAlignmentValue(uint64_t Exponent, unsigned &Alignment);
+  Error parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind);
+  Error parseModule(uint64_t ResumeBit, bool ShouldLazyLoadMetadata = false);
+  Error parseAttributeBlock();
+  Error parseAttributeGroupBlock();
+  Error parseTypeTable();
+  Error parseTypeTableBody();
+  Error parseOperandBundleTags();
+
+  Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record,
+                                unsigned NameIndex, Triple &TT);
+  Error parseValueSymbolTable(uint64_t Offset = 0);
+  Error parseConstants();
+  Error rememberAndSkipFunctionBodies();
+  Error rememberAndSkipFunctionBody();
+  /// Save the positions of the Metadata blocks and skip parsing the blocks.
+  Error rememberAndSkipMetadata();
+  Error typeCheckLoadStoreInst(Type *ValType, Type *PtrType);
+  Error parseFunctionBody(Function *F);
+  Error globalCleanup();
+  Error resolveGlobalAndIndirectSymbolInits();
+  Error parseUseLists();
+  Error findFunctionInStream(
+      Function *F,
+      DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator);
+};
+
+/// Class to manage reading and parsing function summary index bitcode
+/// files/sections.
+class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
+  /// The module index built during parsing.
+  ModuleSummaryIndex &TheIndex;
+
+  /// Indicates whether we have encountered a global value summary section
+  /// yet during parsing.
+  bool SeenGlobalValSummary = false;
+
+  /// Indicates whether we have already parsed the VST, used for error checking.
+  bool SeenValueSymbolTable = false;
+
+  /// Set to the offset of the VST recorded in the MODULE_CODE_VSTOFFSET record.
+  /// Used to enable on-demand parsing of the VST.
+  uint64_t VSTOffset = 0;
+
+  // Map to save ValueId to GUID association that was recorded in the
+  // ValueSymbolTable. It is used after the VST is parsed to convert
+  // call graph edges read from the function summary from referencing
+  // callees by their ValueId to using the GUID instead, which is how
+  // they are recorded in the summary index being built.
+  // We save a second GUID which is the same as the first one, but ignoring the
+  // linkage, i.e. for value other than local linkage they are identical.
+  DenseMap<unsigned, std::pair<GlobalValue::GUID, GlobalValue::GUID>>
+      ValueIdToCallGraphGUIDMap;
+
+  /// Map populated during module path string table parsing, from the
+  /// module ID to a string reference owned by the index's module
+  /// path string table, used to correlate with combined index
+  /// summary records.
+  DenseMap<uint64_t, StringRef> ModuleIdMap;
+
+  /// Original source file name recorded in a bitcode record.
+  std::string SourceFileName;
+
+public:
+  ModuleSummaryIndexBitcodeReader(
+      BitstreamCursor Stream, ModuleSummaryIndex &TheIndex);
+
+  Error parseModule(StringRef ModulePath);
+
+private:
+  Error parseValueSymbolTable(
+      uint64_t Offset,
+      DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap);
+  std::vector<ValueInfo> makeRefList(ArrayRef<uint64_t> Record);
+  std::vector<FunctionSummary::EdgeTy> makeCallList(ArrayRef<uint64_t> Record,
+                                                    bool IsOldProfileFormat,
+                                                    bool HasProfile);
+  Error parseEntireSummary(StringRef ModulePath);
+  Error parseModuleStringTable();
+
+  std::pair<GlobalValue::GUID, GlobalValue::GUID>
+  getGUIDFromValueId(unsigned ValueId);
+};
+
+} // end anonymous namespace
+
+std::error_code llvm::errorToErrorCodeAndEmitErrors(LLVMContext &Ctx,
+                                                    Error Err) {
+  if (Err) {
+    std::error_code EC;
+    handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+      EC = EIB.convertToErrorCode();
+      Ctx.emitError(EIB.message());
+    });
+    return EC;
+  }
+  return std::error_code();
+}
+
+BitcodeReader::BitcodeReader(BitstreamCursor Stream,
+                             StringRef ProducerIdentification,
+                             LLVMContext &Context)
+    : BitcodeReaderBase(std::move(Stream)), Context(Context),
+      ValueList(Context) {
+  this->ProducerIdentification = ProducerIdentification;
+}
+
+Error BitcodeReader::materializeForwardReferencedFunctions() {
+  if (WillMaterializeAllForwardRefs)
+    return Error::success();
+
+  // Prevent recursion.
+  WillMaterializeAllForwardRefs = true;
+
+  while (!BasicBlockFwdRefQueue.empty()) {
+    Function *F = BasicBlockFwdRefQueue.front();
+    BasicBlockFwdRefQueue.pop_front();
+    assert(F && "Expected valid function");
+    if (!BasicBlockFwdRefs.count(F))
+      // Already materialized.
+      continue;
+
+    // Check for a function that isn't materializable to prevent an infinite
+    // loop.  When parsing a blockaddress stored in a global variable, there
+    // isn't a trivial way to check if a function will have a body without a
+    // linear search through FunctionsWithBodies, so just check it here.
+    if (!F->isMaterializable())
+      return error("Never resolved function from blockaddress");
+
+    // Try to materialize F.
+    if (Error Err = materialize(F))
+      return Err;
+  }
+  assert(BasicBlockFwdRefs.empty() && "Function missing from queue");
+
+  // Reset state.
+  WillMaterializeAllForwardRefs = false;
+  return Error::success();
+}
+
+//===----------------------------------------------------------------------===//
+//  Helper functions to implement forward reference resolution, etc.
+//===----------------------------------------------------------------------===//
+
+static bool hasImplicitComdat(size_t Val) {
+  switch (Val) {
+  default:
+    return false;
+  case 1:  // Old WeakAnyLinkage
+  case 4:  // Old LinkOnceAnyLinkage
+  case 10: // Old WeakODRLinkage
+  case 11: // Old LinkOnceODRLinkage
+    return true;
+  }
+}
+
+static GlobalValue::LinkageTypes getDecodedLinkage(unsigned Val) {
+  switch (Val) {
+  default: // Map unknown/new linkages to external
+  case 0:
+    return GlobalValue::ExternalLinkage;
+  case 2:
+    return GlobalValue::AppendingLinkage;
+  case 3:
+    return GlobalValue::InternalLinkage;
+  case 5:
+    return GlobalValue::ExternalLinkage; // Obsolete DLLImportLinkage
+  case 6:
+    return GlobalValue::ExternalLinkage; // Obsolete DLLExportLinkage
+  case 7:
+    return GlobalValue::ExternalWeakLinkage;
+  case 8:
+    return GlobalValue::CommonLinkage;
+  case 9:
+    return GlobalValue::PrivateLinkage;
+  case 12:
+    return GlobalValue::AvailableExternallyLinkage;
+  case 13:
+    return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateLinkage
+  case 14:
+    return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateWeakLinkage
+  case 15:
+    return GlobalValue::ExternalLinkage; // Obsolete LinkOnceODRAutoHideLinkage
+  case 1: // Old value with implicit comdat.
+  case 16:
+    return GlobalValue::WeakAnyLinkage;
+  case 10: // Old value with implicit comdat.
+  case 17:
+    return GlobalValue::WeakODRLinkage;
+  case 4: // Old value with implicit comdat.
+  case 18:
+    return GlobalValue::LinkOnceAnyLinkage;
+  case 11: // Old value with implicit comdat.
+  case 19:
+    return GlobalValue::LinkOnceODRLinkage;
+  }
+}
+
+/// Decode the flags for GlobalValue in the summary.
+static GlobalValueSummary::GVFlags getDecodedGVSummaryFlags(uint64_t RawFlags,
+                                                            uint64_t Version) {
+  // Summary were not emitted before LLVM 3.9, we don't need to upgrade Linkage
+  // like getDecodedLinkage() above. Any future change to the linkage enum and
+  // to getDecodedLinkage() will need to be taken into account here as above.
+  auto Linkage = GlobalValue::LinkageTypes(RawFlags & 0xF); // 4 bits
+  RawFlags = RawFlags >> 4;
+  bool NotEligibleToImport = (RawFlags & 0x1) || Version < 3;
+  // The LiveRoot flag wasn't introduced until version 3. For dead stripping
+  // to work correctly on earlier versions, we must conservatively treat all
+  // values as live.
+  bool LiveRoot = (RawFlags & 0x2) || Version < 3;
+  return GlobalValueSummary::GVFlags(Linkage, NotEligibleToImport, LiveRoot);
+}
+
+static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) {
+  switch (Val) {
+  default: // Map unknown visibilities to default.
+  case 0: return GlobalValue::DefaultVisibility;
+  case 1: return GlobalValue::HiddenVisibility;
+  case 2: return GlobalValue::ProtectedVisibility;
+  }
+}
+
+static GlobalValue::DLLStorageClassTypes
+getDecodedDLLStorageClass(unsigned Val) {
+  switch (Val) {
+  default: // Map unknown values to default.
+  case 0: return GlobalValue::DefaultStorageClass;
+  case 1: return GlobalValue::DLLImportStorageClass;
+  case 2: return GlobalValue::DLLExportStorageClass;
+  }
+}
+
+static GlobalVariable::ThreadLocalMode getDecodedThreadLocalMode(unsigned Val) {
+  switch (Val) {
+    case 0: return GlobalVariable::NotThreadLocal;
+    default: // Map unknown non-zero value to general dynamic.
+    case 1: return GlobalVariable::GeneralDynamicTLSModel;
+    case 2: return GlobalVariable::LocalDynamicTLSModel;
+    case 3: return GlobalVariable::InitialExecTLSModel;
+    case 4: return GlobalVariable::LocalExecTLSModel;
+  }
+}
+
+static GlobalVariable::UnnamedAddr getDecodedUnnamedAddrType(unsigned Val) {
+  switch (Val) {
+    default: // Map unknown to UnnamedAddr::None.
+    case 0: return GlobalVariable::UnnamedAddr::None;
+    case 1: return GlobalVariable::UnnamedAddr::Global;
+    case 2: return GlobalVariable::UnnamedAddr::Local;
+  }
+}
+
+static int getDecodedCastOpcode(unsigned Val) {
+  switch (Val) {
+  default: return -1;
+  case bitc::CAST_TRUNC   : return Instruction::Trunc;
+  case bitc::CAST_ZEXT    : return Instruction::ZExt;
+  case bitc::CAST_SEXT    : return Instruction::SExt;
+  case bitc::CAST_FPTOUI  : return Instruction::FPToUI;
+  case bitc::CAST_FPTOSI  : return Instruction::FPToSI;
+  case bitc::CAST_UITOFP  : return Instruction::UIToFP;
+  case bitc::CAST_SITOFP  : return Instruction::SIToFP;
+  case bitc::CAST_FPTRUNC : return Instruction::FPTrunc;
+  case bitc::CAST_FPEXT   : return Instruction::FPExt;
+  case bitc::CAST_PTRTOINT: return Instruction::PtrToInt;
+  case bitc::CAST_INTTOPTR: return Instruction::IntToPtr;
+  case bitc::CAST_BITCAST : return Instruction::BitCast;
+  case bitc::CAST_ADDRSPACECAST: return Instruction::AddrSpaceCast;
+  }
+}
+
+static int getDecodedBinaryOpcode(unsigned Val, Type *Ty) {
+  bool IsFP = Ty->isFPOrFPVectorTy();
+  // BinOps are only valid for int/fp or vector of int/fp types
+  if (!IsFP && !Ty->isIntOrIntVectorTy())
+    return -1;
+
+  switch (Val) {
+  default:
+    return -1;
+  case bitc::BINOP_ADD:
+    return IsFP ? Instruction::FAdd : Instruction::Add;
+  case bitc::BINOP_SUB:
+    return IsFP ? Instruction::FSub : Instruction::Sub;
+  case bitc::BINOP_MUL:
+    return IsFP ? Instruction::FMul : Instruction::Mul;
+  case bitc::BINOP_UDIV:
+    return IsFP ? -1 : Instruction::UDiv;
+  case bitc::BINOP_SDIV:
+    return IsFP ? Instruction::FDiv : Instruction::SDiv;
+  case bitc::BINOP_UREM:
+    return IsFP ? -1 : Instruction::URem;
+  case bitc::BINOP_SREM:
+    return IsFP ? Instruction::FRem : Instruction::SRem;
+  case bitc::BINOP_SHL:
+    return IsFP ? -1 : Instruction::Shl;
+  case bitc::BINOP_LSHR:
+    return IsFP ? -1 : Instruction::LShr;
+  case bitc::BINOP_ASHR:
+    return IsFP ? -1 : Instruction::AShr;
+  case bitc::BINOP_AND:
+    return IsFP ? -1 : Instruction::And;
+  case bitc::BINOP_OR:
+    return IsFP ? -1 : Instruction::Or;
+  case bitc::BINOP_XOR:
+    return IsFP ? -1 : Instruction::Xor;
+  }
+}
+
+static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
+  switch (Val) {
+  default: return AtomicRMWInst::BAD_BINOP;
+  case bitc::RMW_XCHG: return AtomicRMWInst::Xchg;
+  case bitc::RMW_ADD: return AtomicRMWInst::Add;
+  case bitc::RMW_SUB: return AtomicRMWInst::Sub;
+  case bitc::RMW_AND: return AtomicRMWInst::And;
+  case bitc::RMW_NAND: return AtomicRMWInst::Nand;
+  case bitc::RMW_OR: return AtomicRMWInst::Or;
+  case bitc::RMW_XOR: return AtomicRMWInst::Xor;
+  case bitc::RMW_MAX: return AtomicRMWInst::Max;
+  case bitc::RMW_MIN: return AtomicRMWInst::Min;
+  case bitc::RMW_UMAX: return AtomicRMWInst::UMax;
+  case bitc::RMW_UMIN: return AtomicRMWInst::UMin;
+  }
+}
+
+static AtomicOrdering getDecodedOrdering(unsigned Val) {
+  switch (Val) {
+  case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic;
+  case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered;
+  case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic;
+  case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire;
+  case bitc::ORDERING_RELEASE: return AtomicOrdering::Release;
+  case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease;
+  default: // Map unknown orderings to sequentially-consistent.
+  case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent;
+  }
+}
+
+static SynchronizationScope getDecodedSynchScope(unsigned Val) {
+  switch (Val) {
+  case bitc::SYNCHSCOPE_SINGLETHREAD: return SingleThread;
+  default: // Map unknown scopes to cross-thread.
+  case bitc::SYNCHSCOPE_CROSSTHREAD: return CrossThread;
+  }
+}
+
+static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) {
+  switch (Val) {
+  default: // Map unknown selection kinds to any.
+  case bitc::COMDAT_SELECTION_KIND_ANY:
+    return Comdat::Any;
+  case bitc::COMDAT_SELECTION_KIND_EXACT_MATCH:
+    return Comdat::ExactMatch;
+  case bitc::COMDAT_SELECTION_KIND_LARGEST:
+    return Comdat::Largest;
+  case bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES:
+    return Comdat::NoDuplicates;
+  case bitc::COMDAT_SELECTION_KIND_SAME_SIZE:
+    return Comdat::SameSize;
+  }
+}
+
+static FastMathFlags getDecodedFastMathFlags(unsigned Val) {
+  FastMathFlags FMF;
+  if (0 != (Val & FastMathFlags::UnsafeAlgebra))
+    FMF.setUnsafeAlgebra();
+  if (0 != (Val & FastMathFlags::NoNaNs))
+    FMF.setNoNaNs();
+  if (0 != (Val & FastMathFlags::NoInfs))
+    FMF.setNoInfs();
+  if (0 != (Val & FastMathFlags::NoSignedZeros))
+    FMF.setNoSignedZeros();
+  if (0 != (Val & FastMathFlags::AllowReciprocal))
+    FMF.setAllowReciprocal();
+  return FMF;
+}
+
+static void upgradeDLLImportExportLinkage(GlobalValue *GV, unsigned Val) {
+  switch (Val) {
+  case 5: GV->setDLLStorageClass(GlobalValue::DLLImportStorageClass); break;
+  case 6: GV->setDLLStorageClass(GlobalValue::DLLExportStorageClass); break;
+  }
+}
+
+
+Type *BitcodeReader::getTypeByID(unsigned ID) {
+  // The type table size is always specified correctly.
+  if (ID >= TypeList.size())
+    return nullptr;
+
+  if (Type *Ty = TypeList[ID])
+    return Ty;
+
+  // If we have a forward reference, the only possible case is when it is to a
+  // named struct.  Just create a placeholder for now.
+  return TypeList[ID] = createIdentifiedStructType(Context);
+}
+
+StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context,
+                                                      StringRef Name) {
+  auto *Ret = StructType::create(Context, Name);
+  IdentifiedStructTypes.push_back(Ret);
+  return Ret;
+}
+
+StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context) {
+  auto *Ret = StructType::create(Context);
+  IdentifiedStructTypes.push_back(Ret);
+  return Ret;
+}
+
+//===----------------------------------------------------------------------===//
+//  Functions for parsing blocks from the bitcode file
+//===----------------------------------------------------------------------===//
+
+static uint64_t getRawAttributeMask(Attribute::AttrKind Val) {
+  switch (Val) {
+  case Attribute::EndAttrKinds:
+    llvm_unreachable("Synthetic enumerators which should never get here");
+
+  case Attribute::None:            return 0;
+  case Attribute::ZExt:            return 1 << 0;
+  case Attribute::SExt:            return 1 << 1;
+  case Attribute::NoReturn:        return 1 << 2;
+  case Attribute::InReg:           return 1 << 3;
+  case Attribute::StructRet:       return 1 << 4;
+  case Attribute::NoUnwind:        return 1 << 5;
+  case Attribute::NoAlias:         return 1 << 6;
+  case Attribute::ByVal:           return 1 << 7;
+  case Attribute::Nest:            return 1 << 8;
+  case Attribute::ReadNone:        return 1 << 9;
+  case Attribute::ReadOnly:        return 1 << 10;
+  case Attribute::NoInline:        return 1 << 11;
+  case Attribute::AlwaysInline:    return 1 << 12;
+  case Attribute::OptimizeForSize: return 1 << 13;
+  case Attribute::StackProtect:    return 1 << 14;
+  case Attribute::StackProtectReq: return 1 << 15;
+  case Attribute::Alignment:       return 31 << 16;
+  case Attribute::NoCapture:       return 1 << 21;
+  case Attribute::NoRedZone:       return 1 << 22;
+  case Attribute::NoImplicitFloat: return 1 << 23;
+  case Attribute::Naked:           return 1 << 24;
+  case Attribute::InlineHint:      return 1 << 25;
+  case Attribute::StackAlignment:  return 7 << 26;
+  case Attribute::ReturnsTwice:    return 1 << 29;
+  case Attribute::UWTable:         return 1 << 30;
+  case Attribute::NonLazyBind:     return 1U << 31;
+  case Attribute::SanitizeAddress: return 1ULL << 32;
+  case Attribute::MinSize:         return 1ULL << 33;
+  case Attribute::NoDuplicate:     return 1ULL << 34;
+  case Attribute::StackProtectStrong: return 1ULL << 35;
+  case Attribute::SanitizeThread:  return 1ULL << 36;
+  case Attribute::SanitizeMemory:  return 1ULL << 37;
+  case Attribute::NoBuiltin:       return 1ULL << 38;
+  case Attribute::Returned:        return 1ULL << 39;
+  case Attribute::Cold:            return 1ULL << 40;
+  case Attribute::Builtin:         return 1ULL << 41;
+  case Attribute::OptimizeNone:    return 1ULL << 42;
+  case Attribute::InAlloca:        return 1ULL << 43;
+  case Attribute::NonNull:         return 1ULL << 44;
+  case Attribute::JumpTable:       return 1ULL << 45;
+  case Attribute::Convergent:      return 1ULL << 46;
+  case Attribute::SafeStack:       return 1ULL << 47;
+  case Attribute::NoRecurse:       return 1ULL << 48;
+  case Attribute::InaccessibleMemOnly:         return 1ULL << 49;
+  case Attribute::InaccessibleMemOrArgMemOnly: return 1ULL << 50;
+  case Attribute::SwiftSelf:       return 1ULL << 51;
+  case Attribute::SwiftError:      return 1ULL << 52;
+  case Attribute::WriteOnly:       return 1ULL << 53;
+
+  // VISC Attributes
+  case Attribute::In:                return 1ULL << 54;
+  case Attribute::Out:               return 1ULL << 55;
+  case Attribute::InOut:             return 1ULL << 56;
+
+  case Attribute::Dereferenceable:
+    llvm_unreachable("dereferenceable attribute not supported in raw format");
+    break;
+  case Attribute::DereferenceableOrNull:
+    llvm_unreachable("dereferenceable_or_null attribute not supported in raw "
+                     "format");
+    break;
+  case Attribute::ArgMemOnly:
+    llvm_unreachable("argmemonly attribute not supported in raw format");
+    break;
+  case Attribute::AllocSize:
+    llvm_unreachable("allocsize not supported in raw format");
+    break;
+  }
+  llvm_unreachable("Unsupported attribute type");
+}
+
+static void addRawAttributeValue(AttrBuilder &B, uint64_t Val) {
+  if (!Val) return;
+
+  for (Attribute::AttrKind I = Attribute::None; I != Attribute::EndAttrKinds;
+       I = Attribute::AttrKind(I + 1)) {
+    if (I == Attribute::Dereferenceable ||
+        I == Attribute::DereferenceableOrNull ||
+        I == Attribute::ArgMemOnly ||
+        I == Attribute::AllocSize)
+      continue;
+    if (uint64_t A = (Val & getRawAttributeMask(I))) {
+      if (I == Attribute::Alignment)
+        B.addAlignmentAttr(1ULL << ((A >> 16) - 1));
+      else if (I == Attribute::StackAlignment)
+        B.addStackAlignmentAttr(1ULL << ((A >> 26)-1));
+      else
+        B.addAttribute(I);
+    }
+  }
+}
+
+/// \brief This fills an AttrBuilder object with the LLVM attributes that have
+/// been decoded from the given integer. This function must stay in sync with
+/// 'encodeLLVMAttributesForBitcode'.
+static void decodeLLVMAttributesForBitcode(AttrBuilder &B,
+                                           uint64_t EncodedAttrs) {
+  // FIXME: Remove in 4.0.
+
+  // The alignment is stored as a 16-bit raw value from bits 31--16.  We shift
+  // the bits above 31 down by 11 bits.
+  unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16;
+  assert((!Alignment || isPowerOf2_32(Alignment)) &&
+         "Alignment must be a power of two.");
+
+  if (Alignment)
+    B.addAlignmentAttr(Alignment);
+  addRawAttributeValue(B, ((EncodedAttrs & (0xfffffULL << 32)) >> 11) |
+                          (EncodedAttrs & 0xffff));
+}
+
+Error BitcodeReader::parseAttributeBlock() {
+  if (Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID))
+    return error("Invalid record");
+
+  if (!MAttributes.empty())
+    return error("Invalid multiple blocks");
+
+  SmallVector<uint64_t, 64> Record;
+
+  SmallVector<AttributeSet, 8> Attrs;
+
+  // Read all the records.
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default:  // Default behavior: ignore.
+      break;
+    case bitc::PARAMATTR_CODE_ENTRY_OLD: { // ENTRY: [paramidx0, attr0, ...]
+      // FIXME: Remove in 4.0.
+      if (Record.size() & 1)
+        return error("Invalid record");
+
+      for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
+        AttrBuilder B;
+        decodeLLVMAttributesForBitcode(B, Record[i+1]);
+        Attrs.push_back(AttributeSet::get(Context, Record[i], B));
+      }
+
+      MAttributes.push_back(AttributeSet::get(Context, Attrs));
+      Attrs.clear();
+      break;
+    }
+    case bitc::PARAMATTR_CODE_ENTRY: { // ENTRY: [attrgrp0, attrgrp1, ...]
+      for (unsigned i = 0, e = Record.size(); i != e; ++i)
+        Attrs.push_back(MAttributeGroups[Record[i]]);
+
+      MAttributes.push_back(AttributeSet::get(Context, Attrs));
+      Attrs.clear();
+      break;
+    }
+    }
+  }
+}
+
+// Returns Attribute::None on unrecognized codes.
+static Attribute::AttrKind getAttrFromCode(uint64_t Code) {
+  switch (Code) {
+  default:
+    return Attribute::None;
+  case bitc::ATTR_KIND_ALIGNMENT:
+    return Attribute::Alignment;
+  case bitc::ATTR_KIND_ALWAYS_INLINE:
+    return Attribute::AlwaysInline;
+  case bitc::ATTR_KIND_ARGMEMONLY:
+    return Attribute::ArgMemOnly;
+  case bitc::ATTR_KIND_BUILTIN:
+    return Attribute::Builtin;
+  case bitc::ATTR_KIND_BY_VAL:
+    return Attribute::ByVal;
+  case bitc::ATTR_KIND_IN_ALLOCA:
+    return Attribute::InAlloca;
+  case bitc::ATTR_KIND_COLD:
+    return Attribute::Cold;
+  case bitc::ATTR_KIND_CONVERGENT:
+    return Attribute::Convergent;
+  case bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY:
+    return Attribute::InaccessibleMemOnly;
+  case bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY:
+    return Attribute::InaccessibleMemOrArgMemOnly;
+  case bitc::ATTR_KIND_INLINE_HINT:
+    return Attribute::InlineHint;
+  case bitc::ATTR_KIND_IN_REG:
+    return Attribute::InReg;
+  case bitc::ATTR_KIND_JUMP_TABLE:
+    return Attribute::JumpTable;
+  case bitc::ATTR_KIND_MIN_SIZE:
+    return Attribute::MinSize;
+  case bitc::ATTR_KIND_NAKED:
+    return Attribute::Naked;
+  case bitc::ATTR_KIND_NEST:
+    return Attribute::Nest;
+  case bitc::ATTR_KIND_NO_ALIAS:
+    return Attribute::NoAlias;
+  case bitc::ATTR_KIND_NO_BUILTIN:
+    return Attribute::NoBuiltin;
+  case bitc::ATTR_KIND_NO_CAPTURE:
+    return Attribute::NoCapture;
+  case bitc::ATTR_KIND_NO_DUPLICATE:
+    return Attribute::NoDuplicate;
+  case bitc::ATTR_KIND_NO_IMPLICIT_FLOAT:
+    return Attribute::NoImplicitFloat;
+  case bitc::ATTR_KIND_NO_INLINE:
+    return Attribute::NoInline;
+  case bitc::ATTR_KIND_NO_RECURSE:
+    return Attribute::NoRecurse;
+  case bitc::ATTR_KIND_NON_LAZY_BIND:
+    return Attribute::NonLazyBind;
+  case bitc::ATTR_KIND_NON_NULL:
+    return Attribute::NonNull;
+  case bitc::ATTR_KIND_DEREFERENCEABLE:
+    return Attribute::Dereferenceable;
+  case bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL:
+    return Attribute::DereferenceableOrNull;
+  case bitc::ATTR_KIND_ALLOC_SIZE:
+    return Attribute::AllocSize;
+  case bitc::ATTR_KIND_NO_RED_ZONE:
+    return Attribute::NoRedZone;
+  case bitc::ATTR_KIND_NO_RETURN:
+    return Attribute::NoReturn;
+  case bitc::ATTR_KIND_NO_UNWIND:
+    return Attribute::NoUnwind;
+  case bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE:
+    return Attribute::OptimizeForSize;
+  case bitc::ATTR_KIND_OPTIMIZE_NONE:
+    return Attribute::OptimizeNone;
+  case bitc::ATTR_KIND_READ_NONE:
+    return Attribute::ReadNone;
+  case bitc::ATTR_KIND_READ_ONLY:
+    return Attribute::ReadOnly;
+  case bitc::ATTR_KIND_RETURNED:
+    return Attribute::Returned;
+  case bitc::ATTR_KIND_RETURNS_TWICE:
+    return Attribute::ReturnsTwice;
+  case bitc::ATTR_KIND_S_EXT:
+    return Attribute::SExt;
+  case bitc::ATTR_KIND_STACK_ALIGNMENT:
+    return Attribute::StackAlignment;
+  case bitc::ATTR_KIND_STACK_PROTECT:
+    return Attribute::StackProtect;
+  case bitc::ATTR_KIND_STACK_PROTECT_REQ:
+    return Attribute::StackProtectReq;
+  case bitc::ATTR_KIND_STACK_PROTECT_STRONG:
+    return Attribute::StackProtectStrong;
+  case bitc::ATTR_KIND_SAFESTACK:
+    return Attribute::SafeStack;
+  case bitc::ATTR_KIND_STRUCT_RET:
+    return Attribute::StructRet;
+  case bitc::ATTR_KIND_SANITIZE_ADDRESS:
+    return Attribute::SanitizeAddress;
+  case bitc::ATTR_KIND_SANITIZE_THREAD:
+    return Attribute::SanitizeThread;
+  case bitc::ATTR_KIND_SANITIZE_MEMORY:
+    return Attribute::SanitizeMemory;
+  case bitc::ATTR_KIND_SWIFT_ERROR:
+    return Attribute::SwiftError;
+  case bitc::ATTR_KIND_SWIFT_SELF:
+    return Attribute::SwiftSelf;
+  case bitc::ATTR_KIND_UW_TABLE:
+    return Attribute::UWTable;
+  case bitc::ATTR_KIND_WRITEONLY:
+    return Attribute::WriteOnly;
+  case bitc::ATTR_KIND_Z_EXT:
+    return Attribute::ZExt;
+  }
+}
+
+Error BitcodeReader::parseAlignmentValue(uint64_t Exponent,
+                                         unsigned &Alignment) {
+  // Note: Alignment in bitcode files is incremented by 1, so that zero
+  // can be used for default alignment.
+  if (Exponent > Value::MaxAlignmentExponent + 1)
+    return error("Invalid alignment value");
+  Alignment = (1 << static_cast<unsigned>(Exponent)) >> 1;
+  return Error::success();
+}
+
+Error BitcodeReader::parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind) {
+  *Kind = getAttrFromCode(Code);
+  if (*Kind == Attribute::None)
+    return error("Unknown attribute kind (" + Twine(Code) + ")");
+  return Error::success();
+}
+
+Error BitcodeReader::parseAttributeGroupBlock() {
+  if (Stream.EnterSubBlock(bitc::PARAMATTR_GROUP_BLOCK_ID))
+    return error("Invalid record");
+
+  if (!MAttributeGroups.empty())
+    return error("Invalid multiple blocks");
+
+  SmallVector<uint64_t, 64> Record;
+
+  // Read all the records.
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default:  // Default behavior: ignore.
+      break;
+    case bitc::PARAMATTR_GRP_CODE_ENTRY: { // ENTRY: [grpid, idx, a0, a1, ...]
+      if (Record.size() < 3)
+        return error("Invalid record");
+
+      uint64_t GrpID = Record[0];
+      uint64_t Idx = Record[1]; // Index of the object this attribute refers to.
+
+      AttrBuilder B;
+      for (unsigned i = 2, e = Record.size(); i != e; ++i) {
+        if (Record[i] == 0) {        // Enum attribute
+          Attribute::AttrKind Kind;
+          if (Error Err = parseAttrKind(Record[++i], &Kind))
+            return Err;
+
+          B.addAttribute(Kind);
+        } else if (Record[i] == 1) { // Integer attribute
+          Attribute::AttrKind Kind;
+          if (Error Err = parseAttrKind(Record[++i], &Kind))
+            return Err;
+          if (Kind == Attribute::Alignment)
+            B.addAlignmentAttr(Record[++i]);
+          else if (Kind == Attribute::StackAlignment)
+            B.addStackAlignmentAttr(Record[++i]);
+          else if (Kind == Attribute::Dereferenceable)
+            B.addDereferenceableAttr(Record[++i]);
+          else if (Kind == Attribute::DereferenceableOrNull)
+            B.addDereferenceableOrNullAttr(Record[++i]);
+          else if (Kind == Attribute::AllocSize)
+            B.addAllocSizeAttrFromRawRepr(Record[++i]);
+        } else {                     // String attribute
+          assert((Record[i] == 3 || Record[i] == 4) &&
+                 "Invalid attribute group entry");
+          bool HasValue = (Record[i++] == 4);
+          SmallString<64> KindStr;
+          SmallString<64> ValStr;
+
+          while (Record[i] != 0 && i != e)
+            KindStr += Record[i++];
+          assert(Record[i] == 0 && "Kind string not null terminated");
+
+          if (HasValue) {
+            // Has a value associated with it.
+            ++i; // Skip the '0' that terminates the "kind" string.
+            while (Record[i] != 0 && i != e)
+              ValStr += Record[i++];
+            assert(Record[i] == 0 && "Value string not null terminated");
+          }
+
+          B.addAttribute(KindStr.str(), ValStr.str());
+        }
+      }
+
+      MAttributeGroups[GrpID] = AttributeSet::get(Context, Idx, B);
+      break;
+    }
+    }
+  }
+}
+
+Error BitcodeReader::parseTypeTable() {
+  if (Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW))
+    return error("Invalid record");
+
+  return parseTypeTableBody();
+}
+
+Error BitcodeReader::parseTypeTableBody() {
+  if (!TypeList.empty())
+    return error("Invalid multiple blocks");
+
+  SmallVector<uint64_t, 64> Record;
+  unsigned NumRecords = 0;
+
+  SmallString<64> TypeName;
+
+  // Read all the records for this type table.
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      if (NumRecords != TypeList.size())
+        return error("Malformed block");
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    Type *ResultTy = nullptr;
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default:
+      return error("Invalid value");
+    case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries]
+      // TYPE_CODE_NUMENTRY contains a count of the number of types in the
+      // type list.  This allows us to reserve space.
+      if (Record.size() < 1)
+        return error("Invalid record");
+      TypeList.resize(Record[0]);
+      continue;
+    case bitc::TYPE_CODE_VOID:      // VOID
+      ResultTy = Type::getVoidTy(Context);
+      break;
+    case bitc::TYPE_CODE_HALF:     // HALF
+      ResultTy = Type::getHalfTy(Context);
+      break;
+    case bitc::TYPE_CODE_FLOAT:     // FLOAT
+      ResultTy = Type::getFloatTy(Context);
+      break;
+    case bitc::TYPE_CODE_DOUBLE:    // DOUBLE
+      ResultTy = Type::getDoubleTy(Context);
+      break;
+    case bitc::TYPE_CODE_X86_FP80:  // X86_FP80
+      ResultTy = Type::getX86_FP80Ty(Context);
+      break;
+    case bitc::TYPE_CODE_FP128:     // FP128
+      ResultTy = Type::getFP128Ty(Context);
+      break;
+    case bitc::TYPE_CODE_PPC_FP128: // PPC_FP128
+      ResultTy = Type::getPPC_FP128Ty(Context);
+      break;
+    case bitc::TYPE_CODE_LABEL:     // LABEL
+      ResultTy = Type::getLabelTy(Context);
+      break;
+    case bitc::TYPE_CODE_METADATA:  // METADATA
+      ResultTy = Type::getMetadataTy(Context);
+      break;
+    case bitc::TYPE_CODE_X86_MMX:   // X86_MMX
+      ResultTy = Type::getX86_MMXTy(Context);
+      break;
+    case bitc::TYPE_CODE_TOKEN:     // TOKEN
+      ResultTy = Type::getTokenTy(Context);
+      break;
+    case bitc::TYPE_CODE_INTEGER: { // INTEGER: [width]
+      if (Record.size() < 1)
+        return error("Invalid record");
+
+      uint64_t NumBits = Record[0];
+      if (NumBits < IntegerType::MIN_INT_BITS ||
+          NumBits > IntegerType::MAX_INT_BITS)
+        return error("Bitwidth for integer type out of range");
+      ResultTy = IntegerType::get(Context, NumBits);
+      break;
+    }
+    case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or
+                                    //          [pointee type, address space]
+      if (Record.size() < 1)
+        return error("Invalid record");
+      unsigned AddressSpace = 0;
+      if (Record.size() == 2)
+        AddressSpace = Record[1];
+      ResultTy = getTypeByID(Record[0]);
+      if (!ResultTy ||
+          !PointerType::isValidElementType(ResultTy))
+        return error("Invalid type");
+      ResultTy = PointerType::get(ResultTy, AddressSpace);
+      break;
+    }
+    case bitc::TYPE_CODE_FUNCTION_OLD: {
+      // FIXME: attrid is dead, remove it in LLVM 4.0
+      // FUNCTION: [vararg, attrid, retty, paramty x N]
+      if (Record.size() < 3)
+        return error("Invalid record");
+      SmallVector<Type*, 8> ArgTys;
+      for (unsigned i = 3, e = Record.size(); i != e; ++i) {
+        if (Type *T = getTypeByID(Record[i]))
+          ArgTys.push_back(T);
+        else
+          break;
+      }
+
+      ResultTy = getTypeByID(Record[2]);
+      if (!ResultTy || ArgTys.size() < Record.size()-3)
+        return error("Invalid type");
+
+      ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
+      break;
+    }
+    case bitc::TYPE_CODE_FUNCTION: {
+      // FUNCTION: [vararg, retty, paramty x N]
+      if (Record.size() < 2)
+        return error("Invalid record");
+      SmallVector<Type*, 8> ArgTys;
+      for (unsigned i = 2, e = Record.size(); i != e; ++i) {
+        if (Type *T = getTypeByID(Record[i])) {
+          if (!FunctionType::isValidArgumentType(T))
+            return error("Invalid function argument type");
+          ArgTys.push_back(T);
+        }
+        else
+          break;
+      }
+
+      ResultTy = getTypeByID(Record[1]);
+      if (!ResultTy || ArgTys.size() < Record.size()-2)
+        return error("Invalid type");
+
+      ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
+      break;
+    }
+    case bitc::TYPE_CODE_STRUCT_ANON: {  // STRUCT: [ispacked, eltty x N]
+      if (Record.size() < 1)
+        return error("Invalid record");
+      SmallVector<Type*, 8> EltTys;
+      for (unsigned i = 1, e = Record.size(); i != e; ++i) {
+        if (Type *T = getTypeByID(Record[i]))
+          EltTys.push_back(T);
+        else
+          break;
+      }
+      if (EltTys.size() != Record.size()-1)
+        return error("Invalid type");
+      ResultTy = StructType::get(Context, EltTys, Record[0]);
+      break;
+    }
+    case bitc::TYPE_CODE_STRUCT_NAME:   // STRUCT_NAME: [strchr x N]
+      if (convertToString(Record, 0, TypeName))
+        return error("Invalid record");
+      continue;
+
+    case bitc::TYPE_CODE_STRUCT_NAMED: { // STRUCT: [ispacked, eltty x N]
+      if (Record.size() < 1)
+        return error("Invalid record");
+
+      if (NumRecords >= TypeList.size())
+        return error("Invalid TYPE table");
+
+      // Check to see if this was forward referenced, if so fill in the temp.
+      StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
+      if (Res) {
+        Res->setName(TypeName);
+        TypeList[NumRecords] = nullptr;
+      } else  // Otherwise, create a new struct.
+        Res = createIdentifiedStructType(Context, TypeName);
+      TypeName.clear();
+
+      SmallVector<Type*, 8> EltTys;
+      for (unsigned i = 1, e = Record.size(); i != e; ++i) {
+        if (Type *T = getTypeByID(Record[i]))
+          EltTys.push_back(T);
+        else
+          break;
+      }
+      if (EltTys.size() != Record.size()-1)
+        return error("Invalid record");
+      Res->setBody(EltTys, Record[0]);
+      ResultTy = Res;
+      break;
+    }
+    case bitc::TYPE_CODE_OPAQUE: {       // OPAQUE: []
+      if (Record.size() != 1)
+        return error("Invalid record");
+
+      if (NumRecords >= TypeList.size())
+        return error("Invalid TYPE table");
+
+      // Check to see if this was forward referenced, if so fill in the temp.
+      StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
+      if (Res) {
+        Res->setName(TypeName);
+        TypeList[NumRecords] = nullptr;
+      } else  // Otherwise, create a new struct with no body.
+        Res = createIdentifiedStructType(Context, TypeName);
+      TypeName.clear();
+      ResultTy = Res;
+      break;
+    }
+    case bitc::TYPE_CODE_ARRAY:     // ARRAY: [numelts, eltty]
+      if (Record.size() < 2)
+        return error("Invalid record");
+      ResultTy = getTypeByID(Record[1]);
+      if (!ResultTy || !ArrayType::isValidElementType(ResultTy))
+        return error("Invalid type");
+      ResultTy = ArrayType::get(ResultTy, Record[0]);
+      break;
+    case bitc::TYPE_CODE_VECTOR:    // VECTOR: [numelts, eltty]
+      if (Record.size() < 2)
+        return error("Invalid record");
+      if (Record[0] == 0)
+        return error("Invalid vector length");
+      ResultTy = getTypeByID(Record[1]);
+      if (!ResultTy || !StructType::isValidElementType(ResultTy))
+        return error("Invalid type");
+      ResultTy = VectorType::get(ResultTy, Record[0]);
+      break;
+    }
+
+    if (NumRecords >= TypeList.size())
+      return error("Invalid TYPE table");
+    if (TypeList[NumRecords])
+      return error(
+          "Invalid TYPE table: Only named structs can be forward referenced");
+    assert(ResultTy && "Didn't read a type?");
+    TypeList[NumRecords++] = ResultTy;
+  }
+}
+
+Error BitcodeReader::parseOperandBundleTags() {
+  if (Stream.EnterSubBlock(bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID))
+    return error("Invalid record");
+
+  if (!BundleTags.empty())
+    return error("Invalid multiple blocks");
+
+  SmallVector<uint64_t, 64> Record;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Tags are implicitly mapped to integers by their order.
+
+    if (Stream.readRecord(Entry.ID, Record) != bitc::OPERAND_BUNDLE_TAG)
+      return error("Invalid record");
+
+    // OPERAND_BUNDLE_TAG: [strchr x N]
+    BundleTags.emplace_back();
+    if (convertToString(Record, 0, BundleTags.back()))
+      return error("Invalid record");
+    Record.clear();
+  }
+}
+
+/// Associate a value with its name from the given index in the provided record.
+Expected<Value *> BitcodeReader::recordValue(SmallVectorImpl<uint64_t> &Record,
+                                             unsigned NameIndex, Triple &TT) {
+  SmallString<128> ValueName;
+  if (convertToString(Record, NameIndex, ValueName))
+    return error("Invalid record");
+  unsigned ValueID = Record[0];
+  if (ValueID >= ValueList.size() || !ValueList[ValueID])
+    return error("Invalid record");
+  Value *V = ValueList[ValueID];
+
+  StringRef NameStr(ValueName.data(), ValueName.size());
+  if (NameStr.find_first_of(0) != StringRef::npos)
+    return error("Invalid value name");
+  V->setName(NameStr);
+  auto *GO = dyn_cast<GlobalObject>(V);
+  if (GO) {
+    if (GO->getComdat() == reinterpret_cast<Comdat *>(1)) {
+      if (TT.isOSBinFormatMachO())
+        GO->setComdat(nullptr);
+      else
+        GO->setComdat(TheModule->getOrInsertComdat(V->getName()));
+    }
+  }
+  return V;
+}
+
+/// Helper to note and return the current location, and jump to the given
+/// offset.
+static uint64_t jumpToValueSymbolTable(uint64_t Offset,
+                                       BitstreamCursor &Stream) {
+  // Save the current parsing location so we can jump back at the end
+  // of the VST read.
+  uint64_t CurrentBit = Stream.GetCurrentBitNo();
+  Stream.JumpToBit(Offset * 32);
+#ifndef NDEBUG
+  // Do some checking if we are in debug mode.
+  BitstreamEntry Entry = Stream.advance();
+  assert(Entry.Kind == BitstreamEntry::SubBlock);
+  assert(Entry.ID == bitc::VALUE_SYMTAB_BLOCK_ID);
+#else
+  // In NDEBUG mode ignore the output so we don't get an unused variable
+  // warning.
+  Stream.advance();
+#endif
+  return CurrentBit;
+}
+
+/// Parse the value symbol table at either the current parsing location or
+/// at the given bit offset if provided.
+Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) {
+  uint64_t CurrentBit;
+  // Pass in the Offset to distinguish between calling for the module-level
+  // VST (where we want to jump to the VST offset) and the function-level
+  // VST (where we don't).
+  if (Offset > 0)
+    CurrentBit = jumpToValueSymbolTable(Offset, Stream);
+
+  // Compute the delta between the bitcode indices in the VST (the word offset
+  // to the word-aligned ENTER_SUBBLOCK for the function block, and that
+  // expected by the lazy reader. The reader's EnterSubBlock expects to have
+  // already read the ENTER_SUBBLOCK code (size getAbbrevIDWidth) and BlockID
+  // (size BlockIDWidth). Note that we access the stream's AbbrevID width here
+  // just before entering the VST subblock because: 1) the EnterSubBlock
+  // changes the AbbrevID width; 2) the VST block is nested within the same
+  // outer MODULE_BLOCK as the FUNCTION_BLOCKs and therefore have the same
+  // AbbrevID width before calling EnterSubBlock; and 3) when we want to
+  // jump to the FUNCTION_BLOCK using this offset later, we don't want
+  // to rely on the stream's AbbrevID width being that of the MODULE_BLOCK.
+  unsigned FuncBitcodeOffsetDelta =
+      Stream.getAbbrevIDWidth() + bitc::BlockIDWidth;
+
+  if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+
+  Triple TT(TheModule->getTargetTriple());
+
+  // Read all the records for this value table.
+  SmallString<128> ValueName;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      if (Offset > 0)
+        Stream.JumpToBit(CurrentBit);
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default:  // Default behavior: unknown type.
+      break;
+    case bitc::VST_CODE_ENTRY: {  // VST_CODE_ENTRY: [valueid, namechar x N]
+      Expected<Value *> ValOrErr = recordValue(Record, 1, TT);
+      if (Error Err = ValOrErr.takeError())
+        return Err;
+      ValOrErr.get();
+      break;
+    }
+    case bitc::VST_CODE_FNENTRY: {
+      // VST_CODE_FNENTRY: [valueid, offset, namechar x N]
+      Expected<Value *> ValOrErr = recordValue(Record, 2, TT);
+      if (Error Err = ValOrErr.takeError())
+        return Err;
+      Value *V = ValOrErr.get();
+
+      auto *GO = dyn_cast<GlobalObject>(V);
+      if (!GO) {
+        // If this is an alias, need to get the actual Function object
+        // it aliases, in order to set up the DeferredFunctionInfo entry below.
+        auto *GA = dyn_cast<GlobalAlias>(V);
+        if (GA)
+          GO = GA->getBaseObject();
+        assert(GO);
+      }
+
+      // Note that we subtract 1 here because the offset is relative to one word
+      // before the start of the identification or module block, which was
+      // historically always the start of the regular bitcode header.
+      uint64_t FuncWordOffset = Record[1] - 1;
+      Function *F = dyn_cast<Function>(GO);
+      assert(F);
+      uint64_t FuncBitOffset = FuncWordOffset * 32;
+      DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta;
+      // Set the LastFunctionBlockBit to point to the last function block.
+      // Later when parsing is resumed after function materialization,
+      // we can simply skip that last function block.
+      if (FuncBitOffset > LastFunctionBlockBit)
+        LastFunctionBlockBit = FuncBitOffset;
+      break;
+    }
+    case bitc::VST_CODE_BBENTRY: {
+      if (convertToString(Record, 1, ValueName))
+        return error("Invalid record");
+      BasicBlock *BB = getBasicBlock(Record[0]);
+      if (!BB)
+        return error("Invalid record");
+
+      BB->setName(StringRef(ValueName.data(), ValueName.size()));
+      ValueName.clear();
+      break;
+    }
+    }
+  }
+}
+
+/// Decode a signed value stored with the sign bit in the LSB for dense VBR
+/// encoding.
+uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) {
+  if ((V & 1) == 0)
+    return V >> 1;
+  if (V != 1)
+    return -(V >> 1);
+  // There is no such thing as -0 with integers.  "-0" really means MININT.
+  return 1ULL << 63;
+}
+
+/// Resolve all of the initializers for global values and aliases that we can.
+Error BitcodeReader::resolveGlobalAndIndirectSymbolInits() {
+  std::vector<std::pair<GlobalVariable*, unsigned> > GlobalInitWorklist;
+  std::vector<std::pair<GlobalIndirectSymbol*, unsigned> >
+      IndirectSymbolInitWorklist;
+  std::vector<std::pair<Function*, unsigned> > FunctionPrefixWorklist;
+  std::vector<std::pair<Function*, unsigned> > FunctionPrologueWorklist;
+  std::vector<std::pair<Function*, unsigned> > FunctionPersonalityFnWorklist;
+
+  GlobalInitWorklist.swap(GlobalInits);
+  IndirectSymbolInitWorklist.swap(IndirectSymbolInits);
+  FunctionPrefixWorklist.swap(FunctionPrefixes);
+  FunctionPrologueWorklist.swap(FunctionPrologues);
+  FunctionPersonalityFnWorklist.swap(FunctionPersonalityFns);
+
+  while (!GlobalInitWorklist.empty()) {
+    unsigned ValID = GlobalInitWorklist.back().second;
+    if (ValID >= ValueList.size()) {
+      // Not ready to resolve this yet, it requires something later in the file.
+      GlobalInits.push_back(GlobalInitWorklist.back());
+    } else {
+      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+        GlobalInitWorklist.back().first->setInitializer(C);
+      else
+        return error("Expected a constant");
+    }
+    GlobalInitWorklist.pop_back();
+  }
+
+  while (!IndirectSymbolInitWorklist.empty()) {
+    unsigned ValID = IndirectSymbolInitWorklist.back().second;
+    if (ValID >= ValueList.size()) {
+      IndirectSymbolInits.push_back(IndirectSymbolInitWorklist.back());
+    } else {
+      Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]);
+      if (!C)
+        return error("Expected a constant");
+      GlobalIndirectSymbol *GIS = IndirectSymbolInitWorklist.back().first;
+      if (isa<GlobalAlias>(GIS) && C->getType() != GIS->getType())
+        return error("Alias and aliasee types don't match");
+      GIS->setIndirectSymbol(C);
+    }
+    IndirectSymbolInitWorklist.pop_back();
+  }
+
+  while (!FunctionPrefixWorklist.empty()) {
+    unsigned ValID = FunctionPrefixWorklist.back().second;
+    if (ValID >= ValueList.size()) {
+      FunctionPrefixes.push_back(FunctionPrefixWorklist.back());
+    } else {
+      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+        FunctionPrefixWorklist.back().first->setPrefixData(C);
+      else
+        return error("Expected a constant");
+    }
+    FunctionPrefixWorklist.pop_back();
+  }
+
+  while (!FunctionPrologueWorklist.empty()) {
+    unsigned ValID = FunctionPrologueWorklist.back().second;
+    if (ValID >= ValueList.size()) {
+      FunctionPrologues.push_back(FunctionPrologueWorklist.back());
+    } else {
+      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+        FunctionPrologueWorklist.back().first->setPrologueData(C);
+      else
+        return error("Expected a constant");
+    }
+    FunctionPrologueWorklist.pop_back();
+  }
+
+  while (!FunctionPersonalityFnWorklist.empty()) {
+    unsigned ValID = FunctionPersonalityFnWorklist.back().second;
+    if (ValID >= ValueList.size()) {
+      FunctionPersonalityFns.push_back(FunctionPersonalityFnWorklist.back());
+    } else {
+      if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+        FunctionPersonalityFnWorklist.back().first->setPersonalityFn(C);
+      else
+        return error("Expected a constant");
+    }
+    FunctionPersonalityFnWorklist.pop_back();
+  }
+
+  return Error::success();
+}
+
+static APInt readWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
+  SmallVector<uint64_t, 8> Words(Vals.size());
+  transform(Vals, Words.begin(),
+                 BitcodeReader::decodeSignRotatedValue);
+
+  return APInt(TypeBits, Words);
+}
+
+Error BitcodeReader::parseConstants() {
+  if (Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+
+  // Read all the records for this value table.
+  Type *CurTy = Type::getInt32Ty(Context);
+  unsigned NextCstNo = ValueList.size();
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      if (NextCstNo != ValueList.size())
+        return error("Invalid constant reference");
+
+      // Once all the constants have been read, go through and resolve forward
+      // references.
+      ValueList.resolveConstantForwardRefs();
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    Type *VoidType = Type::getVoidTy(Context);
+    Value *V = nullptr;
+    unsigned BitCode = Stream.readRecord(Entry.ID, Record);
+    switch (BitCode) {
+    default:  // Default behavior: unknown constant
+    case bitc::CST_CODE_UNDEF:     // UNDEF
+      V = UndefValue::get(CurTy);
+      break;
+    case bitc::CST_CODE_SETTYPE:   // SETTYPE: [typeid]
+      if (Record.empty())
+        return error("Invalid record");
+      if (Record[0] >= TypeList.size() || !TypeList[Record[0]])
+        return error("Invalid record");
+      if (TypeList[Record[0]] == VoidType)
+        return error("Invalid constant type");
+      CurTy = TypeList[Record[0]];
+      continue;  // Skip the ValueList manipulation.
+    case bitc::CST_CODE_NULL:      // NULL
+      V = Constant::getNullValue(CurTy);
+      break;
+    case bitc::CST_CODE_INTEGER:   // INTEGER: [intval]
+      if (!CurTy->isIntegerTy() || Record.empty())
+        return error("Invalid record");
+      V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0]));
+      break;
+    case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval]
+      if (!CurTy->isIntegerTy() || Record.empty())
+        return error("Invalid record");
+
+      APInt VInt =
+          readWideAPInt(Record, cast<IntegerType>(CurTy)->getBitWidth());
+      V = ConstantInt::get(Context, VInt);
+
+      break;
+    }
+    case bitc::CST_CODE_FLOAT: {    // FLOAT: [fpval]
+      if (Record.empty())
+        return error("Invalid record");
+      if (CurTy->isHalfTy())
+        V = ConstantFP::get(Context, APFloat(APFloat::IEEEhalf(),
+                                             APInt(16, (uint16_t)Record[0])));
+      else if (CurTy->isFloatTy())
+        V = ConstantFP::get(Context, APFloat(APFloat::IEEEsingle(),
+                                             APInt(32, (uint32_t)Record[0])));
+      else if (CurTy->isDoubleTy())
+        V = ConstantFP::get(Context, APFloat(APFloat::IEEEdouble(),
+                                             APInt(64, Record[0])));
+      else if (CurTy->isX86_FP80Ty()) {
+        // Bits are not stored the same way as a normal i80 APInt, compensate.
+        uint64_t Rearrange[2];
+        Rearrange[0] = (Record[1] & 0xffffLL) | (Record[0] << 16);
+        Rearrange[1] = Record[0] >> 48;
+        V = ConstantFP::get(Context, APFloat(APFloat::x87DoubleExtended(),
+                                             APInt(80, Rearrange)));
+      } else if (CurTy->isFP128Ty())
+        V = ConstantFP::get(Context, APFloat(APFloat::IEEEquad(),
+                                             APInt(128, Record)));
+      else if (CurTy->isPPC_FP128Ty())
+        V = ConstantFP::get(Context, APFloat(APFloat::PPCDoubleDouble(),
+                                             APInt(128, Record)));
+      else
+        V = UndefValue::get(CurTy);
+      break;
+    }
+
+    case bitc::CST_CODE_AGGREGATE: {// AGGREGATE: [n x value number]
+      if (Record.empty())
+        return error("Invalid record");
+
+      unsigned Size = Record.size();
+      SmallVector<Constant*, 16> Elts;
+
+      if (StructType *STy = dyn_cast<StructType>(CurTy)) {
+        for (unsigned i = 0; i != Size; ++i)
+          Elts.push_back(ValueList.getConstantFwdRef(Record[i],
+                                                     STy->getElementType(i)));
+        V = ConstantStruct::get(STy, Elts);
+      } else if (ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
+        Type *EltTy = ATy->getElementType();
+        for (unsigned i = 0; i != Size; ++i)
+          Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
+        V = ConstantArray::get(ATy, Elts);
+      } else if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) {
+        Type *EltTy = VTy->getElementType();
+        for (unsigned i = 0; i != Size; ++i)
+          Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
+        V = ConstantVector::get(Elts);
+      } else {
+        V = UndefValue::get(CurTy);
+      }
+      break;
+    }
+    case bitc::CST_CODE_STRING:    // STRING: [values]
+    case bitc::CST_CODE_CSTRING: { // CSTRING: [values]
+      if (Record.empty())
+        return error("Invalid record");
+
+      SmallString<16> Elts(Record.begin(), Record.end());
+      V = ConstantDataArray::getString(Context, Elts,
+                                       BitCode == bitc::CST_CODE_CSTRING);
+      break;
+    }
+    case bitc::CST_CODE_DATA: {// DATA: [n x value]
+      if (Record.empty())
+        return error("Invalid record");
+
+      Type *EltTy = cast<SequentialType>(CurTy)->getElementType();
+      if (EltTy->isIntegerTy(8)) {
+        SmallVector<uint8_t, 16> Elts(Record.begin(), Record.end());
+        if (isa<VectorType>(CurTy))
+          V = ConstantDataVector::get(Context, Elts);
+        else
+          V = ConstantDataArray::get(Context, Elts);
+      } else if (EltTy->isIntegerTy(16)) {
+        SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end());
+        if (isa<VectorType>(CurTy))
+          V = ConstantDataVector::get(Context, Elts);
+        else
+          V = ConstantDataArray::get(Context, Elts);
+      } else if (EltTy->isIntegerTy(32)) {
+        SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end());
+        if (isa<VectorType>(CurTy))
+          V = ConstantDataVector::get(Context, Elts);
+        else
+          V = ConstantDataArray::get(Context, Elts);
+      } else if (EltTy->isIntegerTy(64)) {
+        SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end());
+        if (isa<VectorType>(CurTy))
+          V = ConstantDataVector::get(Context, Elts);
+        else
+          V = ConstantDataArray::get(Context, Elts);
+      } else if (EltTy->isHalfTy()) {
+        SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end());
+        if (isa<VectorType>(CurTy))
+          V = ConstantDataVector::getFP(Context, Elts);
+        else
+          V = ConstantDataArray::getFP(Context, Elts);
+      } else if (EltTy->isFloatTy()) {
+        SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end());
+        if (isa<VectorType>(CurTy))
+          V = ConstantDataVector::getFP(Context, Elts);
+        else
+          V = ConstantDataArray::getFP(Context, Elts);
+      } else if (EltTy->isDoubleTy()) {
+        SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end());
+        if (isa<VectorType>(CurTy))
+          V = ConstantDataVector::getFP(Context, Elts);
+        else
+          V = ConstantDataArray::getFP(Context, Elts);
+      } else {
+        return error("Invalid type for value");
+      }
+      break;
+    }
+    case bitc::CST_CODE_CE_BINOP: {  // CE_BINOP: [opcode, opval, opval]
+      if (Record.size() < 3)
+        return error("Invalid record");
+      int Opc = getDecodedBinaryOpcode(Record[0], CurTy);
+      if (Opc < 0) {
+        V = UndefValue::get(CurTy);  // Unknown binop.
+      } else {
+        Constant *LHS = ValueList.getConstantFwdRef(Record[1], CurTy);
+        Constant *RHS = ValueList.getConstantFwdRef(Record[2], CurTy);
+        unsigned Flags = 0;
+        if (Record.size() >= 4) {
+          if (Opc == Instruction::Add ||
+              Opc == Instruction::Sub ||
+              Opc == Instruction::Mul ||
+              Opc == Instruction::Shl) {
+            if (Record[3] & (1 << bitc::OBO_NO_SIGNED_WRAP))
+              Flags |= OverflowingBinaryOperator::NoSignedWrap;
+            if (Record[3] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
+              Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
+          } else if (Opc == Instruction::SDiv ||
+                     Opc == Instruction::UDiv ||
+                     Opc == Instruction::LShr ||
+                     Opc == Instruction::AShr) {
+            if (Record[3] & (1 << bitc::PEO_EXACT))
+              Flags |= SDivOperator::IsExact;
+          }
+        }
+        V = ConstantExpr::get(Opc, LHS, RHS, Flags);
+      }
+      break;
+    }
+    case bitc::CST_CODE_CE_CAST: {  // CE_CAST: [opcode, opty, opval]
+      if (Record.size() < 3)
+        return error("Invalid record");
+      int Opc = getDecodedCastOpcode(Record[0]);
+      if (Opc < 0) {
+        V = UndefValue::get(CurTy);  // Unknown cast.
+      } else {
+        Type *OpTy = getTypeByID(Record[1]);
+        if (!OpTy)
+          return error("Invalid record");
+        Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy);
+        V = UpgradeBitCastExpr(Opc, Op, CurTy);
+        if (!V) V = ConstantExpr::getCast(Opc, Op, CurTy);
+      }
+      break;
+    }
+    case bitc::CST_CODE_CE_INBOUNDS_GEP: // [ty, n x operands]
+    case bitc::CST_CODE_CE_GEP: // [ty, n x operands]
+    case bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX: { // [ty, flags, n x
+                                                     // operands]
+      unsigned OpNum = 0;
+      Type *PointeeType = nullptr;
+      if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX ||
+          Record.size() % 2)
+        PointeeType = getTypeByID(Record[OpNum++]);
+
+      bool InBounds = false;
+      Optional<unsigned> InRangeIndex;
+      if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX) {
+        uint64_t Op = Record[OpNum++];
+        InBounds = Op & 1;
+        InRangeIndex = Op >> 1;
+      } else if (BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP)
+        InBounds = true;
+
+      SmallVector<Constant*, 16> Elts;
+      while (OpNum != Record.size()) {
+        Type *ElTy = getTypeByID(Record[OpNum++]);
+        if (!ElTy)
+          return error("Invalid record");
+        Elts.push_back(ValueList.getConstantFwdRef(Record[OpNum++], ElTy));
+      }
+
+      if (PointeeType &&
+          PointeeType !=
+              cast<PointerType>(Elts[0]->getType()->getScalarType())
+                  ->getElementType())
+        return error("Explicit gep operator type does not match pointee type "
+                     "of pointer operand");
+
+      if (Elts.size() < 1)
+        return error("Invalid gep with no operands");
+
+      ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end());
+      V = ConstantExpr::getGetElementPtr(PointeeType, Elts[0], Indices,
+                                         InBounds, InRangeIndex);
+      break;
+    }
+    case bitc::CST_CODE_CE_SELECT: {  // CE_SELECT: [opval#, opval#, opval#]
+      if (Record.size() < 3)
+        return error("Invalid record");
+
+      Type *SelectorTy = Type::getInt1Ty(Context);
+
+      // The selector might be an i1 or an <n x i1>
+      // Get the type from the ValueList before getting a forward ref.
+      if (VectorType *VTy = dyn_cast<VectorType>(CurTy))
+        if (Value *V = ValueList[Record[0]])
+          if (SelectorTy != V->getType())
+            SelectorTy = VectorType::get(SelectorTy, VTy->getNumElements());
+
+      V = ConstantExpr::getSelect(ValueList.getConstantFwdRef(Record[0],
+                                                              SelectorTy),
+                                  ValueList.getConstantFwdRef(Record[1],CurTy),
+                                  ValueList.getConstantFwdRef(Record[2],CurTy));
+      break;
+    }
+    case bitc::CST_CODE_CE_EXTRACTELT
+        : { // CE_EXTRACTELT: [opty, opval, opty, opval]
+      if (Record.size() < 3)
+        return error("Invalid record");
+      VectorType *OpTy =
+        dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
+      if (!OpTy)
+        return error("Invalid record");
+      Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
+      Constant *Op1 = nullptr;
+      if (Record.size() == 4) {
+        Type *IdxTy = getTypeByID(Record[2]);
+        if (!IdxTy)
+          return error("Invalid record");
+        Op1 = ValueList.getConstantFwdRef(Record[3], IdxTy);
+      } else // TODO: Remove with llvm 4.0
+        Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
+      if (!Op1)
+        return error("Invalid record");
+      V = ConstantExpr::getExtractElement(Op0, Op1);
+      break;
+    }
+    case bitc::CST_CODE_CE_INSERTELT
+        : { // CE_INSERTELT: [opval, opval, opty, opval]
+      VectorType *OpTy = dyn_cast<VectorType>(CurTy);
+      if (Record.size() < 3 || !OpTy)
+        return error("Invalid record");
+      Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
+      Constant *Op1 = ValueList.getConstantFwdRef(Record[1],
+                                                  OpTy->getElementType());
+      Constant *Op2 = nullptr;
+      if (Record.size() == 4) {
+        Type *IdxTy = getTypeByID(Record[2]);
+        if (!IdxTy)
+          return error("Invalid record");
+        Op2 = ValueList.getConstantFwdRef(Record[3], IdxTy);
+      } else // TODO: Remove with llvm 4.0
+        Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
+      if (!Op2)
+        return error("Invalid record");
+      V = ConstantExpr::getInsertElement(Op0, Op1, Op2);
+      break;
+    }
+    case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval]
+      VectorType *OpTy = dyn_cast<VectorType>(CurTy);
+      if (Record.size() < 3 || !OpTy)
+        return error("Invalid record");
+      Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
+      Constant *Op1 = ValueList.getConstantFwdRef(Record[1], OpTy);
+      Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
+                                                 OpTy->getNumElements());
+      Constant *Op2 = ValueList.getConstantFwdRef(Record[2], ShufTy);
+      V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
+      break;
+    }
+    case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval]
+      VectorType *RTy = dyn_cast<VectorType>(CurTy);
+      VectorType *OpTy =
+        dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
+      if (Record.size() < 4 || !RTy || !OpTy)
+        return error("Invalid record");
+      Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
+      Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
+      Type *ShufTy = VectorType::get(Type::getInt32Ty(Context),
+                                                 RTy->getNumElements());
+      Constant *Op2 = ValueList.getConstantFwdRef(Record[3], ShufTy);
+      V = ConstantExpr::getShuffleVector(Op0, Op1, Op2);
+      break;
+    }
+    case bitc::CST_CODE_CE_CMP: {     // CE_CMP: [opty, opval, opval, pred]
+      if (Record.size() < 4)
+        return error("Invalid record");
+      Type *OpTy = getTypeByID(Record[0]);
+      if (!OpTy)
+        return error("Invalid record");
+      Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
+      Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
+
+      if (OpTy->isFPOrFPVectorTy())
+        V = ConstantExpr::getFCmp(Record[3], Op0, Op1);
+      else
+        V = ConstantExpr::getICmp(Record[3], Op0, Op1);
+      break;
+    }
+    // This maintains backward compatibility, pre-asm dialect keywords.
+    // FIXME: Remove with the 4.0 release.
+    case bitc::CST_CODE_INLINEASM_OLD: {
+      if (Record.size() < 2)
+        return error("Invalid record");
+      std::string AsmStr, ConstrStr;
+      bool HasSideEffects = Record[0] & 1;
+      bool IsAlignStack = Record[0] >> 1;
+      unsigned AsmStrSize = Record[1];
+      if (2+AsmStrSize >= Record.size())
+        return error("Invalid record");
+      unsigned ConstStrSize = Record[2+AsmStrSize];
+      if (3+AsmStrSize+ConstStrSize > Record.size())
+        return error("Invalid record");
+
+      for (unsigned i = 0; i != AsmStrSize; ++i)
+        AsmStr += (char)Record[2+i];
+      for (unsigned i = 0; i != ConstStrSize; ++i)
+        ConstrStr += (char)Record[3+AsmStrSize+i];
+      PointerType *PTy = cast<PointerType>(CurTy);
+      V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()),
+                         AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
+      break;
+    }
+    // This version adds support for the asm dialect keywords (e.g.,
+    // inteldialect).
+    case bitc::CST_CODE_INLINEASM: {
+      if (Record.size() < 2)
+        return error("Invalid record");
+      std::string AsmStr, ConstrStr;
+      bool HasSideEffects = Record[0] & 1;
+      bool IsAlignStack = (Record[0] >> 1) & 1;
+      unsigned AsmDialect = Record[0] >> 2;
+      unsigned AsmStrSize = Record[1];
+      if (2+AsmStrSize >= Record.size())
+        return error("Invalid record");
+      unsigned ConstStrSize = Record[2+AsmStrSize];
+      if (3+AsmStrSize+ConstStrSize > Record.size())
+        return error("Invalid record");
+
+      for (unsigned i = 0; i != AsmStrSize; ++i)
+        AsmStr += (char)Record[2+i];
+      for (unsigned i = 0; i != ConstStrSize; ++i)
+        ConstrStr += (char)Record[3+AsmStrSize+i];
+      PointerType *PTy = cast<PointerType>(CurTy);
+      V = InlineAsm::get(cast<FunctionType>(PTy->getElementType()),
+                         AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+                         InlineAsm::AsmDialect(AsmDialect));
+      break;
+    }
+    case bitc::CST_CODE_BLOCKADDRESS:{
+      if (Record.size() < 3)
+        return error("Invalid record");
+      Type *FnTy = getTypeByID(Record[0]);
+      if (!FnTy)
+        return error("Invalid record");
+      Function *Fn =
+        dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
+      if (!Fn)
+        return error("Invalid record");
+
+      // If the function is already parsed we can insert the block address right
+      // away.
+      BasicBlock *BB;
+      unsigned BBID = Record[2];
+      if (!BBID)
+        // Invalid reference to entry block.
+        return error("Invalid ID");
+      if (!Fn->empty()) {
+        Function::iterator BBI = Fn->begin(), BBE = Fn->end();
+        for (size_t I = 0, E = BBID; I != E; ++I) {
+          if (BBI == BBE)
+            return error("Invalid ID");
+          ++BBI;
+        }
+        BB = &*BBI;
+      } else {
+        // Otherwise insert a placeholder and remember it so it can be inserted
+        // when the function is parsed.
+        auto &FwdBBs = BasicBlockFwdRefs[Fn];
+        if (FwdBBs.empty())
+          BasicBlockFwdRefQueue.push_back(Fn);
+        if (FwdBBs.size() < BBID + 1)
+          FwdBBs.resize(BBID + 1);
+        if (!FwdBBs[BBID])
+          FwdBBs[BBID] = BasicBlock::Create(Context);
+        BB = FwdBBs[BBID];
+      }
+      V = BlockAddress::get(Fn, BB);
+      break;
+    }
+    }
+
+    ValueList.assignValue(V, NextCstNo);
+    ++NextCstNo;
+  }
+}
+
+Error BitcodeReader::parseUseLists() {
+  if (Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID))
+    return error("Invalid record");
+
+  // Read all the records.
+  SmallVector<uint64_t, 64> Record;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a use list record.
+    Record.clear();
+    bool IsBB = false;
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default:  // Default behavior: unknown type.
+      break;
+    case bitc::USELIST_CODE_BB:
+      IsBB = true;
+      LLVM_FALLTHROUGH;
+    case bitc::USELIST_CODE_DEFAULT: {
+      unsigned RecordLength = Record.size();
+      if (RecordLength < 3)
+        // Records should have at least an ID and two indexes.
+        return error("Invalid record");
+      unsigned ID = Record.back();
+      Record.pop_back();
+
+      Value *V;
+      if (IsBB) {
+        assert(ID < FunctionBBs.size() && "Basic block not found");
+        V = FunctionBBs[ID];
+      } else
+        V = ValueList[ID];
+      unsigned NumUses = 0;
+      SmallDenseMap<const Use *, unsigned, 16> Order;
+      for (const Use &U : V->materialized_uses()) {
+        if (++NumUses > Record.size())
+          break;
+        Order[&U] = Record[NumUses - 1];
+      }
+      if (Order.size() != Record.size() || NumUses > Record.size())
+        // Mismatches can happen if the functions are being materialized lazily
+        // (out-of-order), or a value has been upgraded.
+        break;
+
+      V->sortUseList([&](const Use &L, const Use &R) {
+        return Order.lookup(&L) < Order.lookup(&R);
+      });
+      break;
+    }
+    }
+  }
+}
+
+/// When we see the block for metadata, remember where it is and then skip it.
+/// This lets us lazily deserialize the metadata.
+Error BitcodeReader::rememberAndSkipMetadata() {
+  // Save the current stream state.
+  uint64_t CurBit = Stream.GetCurrentBitNo();
+  DeferredMetadataInfo.push_back(CurBit);
+
+  // Skip over the block for now.
+  if (Stream.SkipBlock())
+    return error("Invalid record");
+  return Error::success();
+}
+
+Error BitcodeReader::materializeMetadata() {
+  for (uint64_t BitPos : DeferredMetadataInfo) {
+    // Move the bit stream to the saved position.
+    Stream.JumpToBit(BitPos);
+    if (Error Err = MDLoader->parseModuleMetadata())
+      return Err;
+  }
+  DeferredMetadataInfo.clear();
+  return Error::success();
+}
+
+void BitcodeReader::setStripDebugInfo() { StripDebugInfo = true; }
+
+/// When we see the block for a function body, remember where it is and then
+/// skip it.  This lets us lazily deserialize the functions.
+Error BitcodeReader::rememberAndSkipFunctionBody() {
+  // Get the function we are talking about.
+  if (FunctionsWithBodies.empty())
+    return error("Insufficient function protos");
+
+  Function *Fn = FunctionsWithBodies.back();
+  FunctionsWithBodies.pop_back();
+
+  // Save the current stream state.
+  uint64_t CurBit = Stream.GetCurrentBitNo();
+  assert(
+      (DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) &&
+      "Mismatch between VST and scanned function offsets");
+  DeferredFunctionInfo[Fn] = CurBit;
+
+  // Skip over the function block for now.
+  if (Stream.SkipBlock())
+    return error("Invalid record");
+  return Error::success();
+}
+
+Error BitcodeReader::globalCleanup() {
+  // Patch the initializers for globals and aliases up.
+  if (Error Err = resolveGlobalAndIndirectSymbolInits())
+    return Err;
+  if (!GlobalInits.empty() || !IndirectSymbolInits.empty())
+    return error("Malformed global initializer set");
+
+  // Look for intrinsic functions which need to be upgraded at some point
+  for (Function &F : *TheModule) {
+    Function *NewFn;
+    if (UpgradeIntrinsicFunction(&F, NewFn))
+      UpgradedIntrinsics[&F] = NewFn;
+    else if (auto Remangled = Intrinsic::remangleIntrinsicFunction(&F))
+      // Some types could be renamed during loading if several modules are
+      // loaded in the same LLVMContext (LTO scenario). In this case we should
+      // remangle intrinsics names as well.
+      RemangledIntrinsics[&F] = Remangled.getValue();
+  }
+
+  // Look for global variables which need to be renamed.
+  for (GlobalVariable &GV : TheModule->globals())
+    UpgradeGlobalVariable(&GV);
+
+  // Force deallocation of memory for these vectors to favor the client that
+  // want lazy deserialization.
+  std::vector<std::pair<GlobalVariable*, unsigned> >().swap(GlobalInits);
+  std::vector<std::pair<GlobalIndirectSymbol*, unsigned> >().swap(
+      IndirectSymbolInits);
+  return Error::success();
+}
+
+/// Support for lazy parsing of function bodies. This is required if we
+/// either have an old bitcode file without a VST forward declaration record,
+/// or if we have an anonymous function being materialized, since anonymous
+/// functions do not have a name and are therefore not in the VST.
+Error BitcodeReader::rememberAndSkipFunctionBodies() {
+  Stream.JumpToBit(NextUnreadBit);
+
+  if (Stream.AtEndOfStream())
+    return error("Could not find function in stream");
+
+  if (!SeenFirstFunctionBody)
+    return error("Trying to materialize functions before seeing function blocks");
+
+  // An old bitcode file with the symbol table at the end would have
+  // finished the parse greedily.
+  assert(SeenValueSymbolTable);
+
+  SmallVector<uint64_t, 64> Record;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+    switch (Entry.Kind) {
+    default:
+      return error("Expect SubBlock");
+    case BitstreamEntry::SubBlock:
+      switch (Entry.ID) {
+      default:
+        return error("Expect function block");
+      case bitc::FUNCTION_BLOCK_ID:
+        if (Error Err = rememberAndSkipFunctionBody())
+          return Err;
+        NextUnreadBit = Stream.GetCurrentBitNo();
+        return Error::success();
+      }
+    }
+  }
+}
+
+bool BitcodeReaderBase::readBlockInfo() {
+  Optional<BitstreamBlockInfo> NewBlockInfo = Stream.ReadBlockInfoBlock();
+  if (!NewBlockInfo)
+    return true;
+  BlockInfo = std::move(*NewBlockInfo);
+  return false;
+}
+
+Error BitcodeReader::parseModule(uint64_t ResumeBit,
+                                 bool ShouldLazyLoadMetadata) {
+  if (ResumeBit)
+    Stream.JumpToBit(ResumeBit);
+  else if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+  std::vector<std::string> SectionTable;
+  std::vector<std::string> GCTable;
+
+  // Read all the records for this module.
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return globalCleanup();
+
+    case BitstreamEntry::SubBlock:
+      switch (Entry.ID) {
+      default:  // Skip unknown content.
+        if (Stream.SkipBlock())
+          return error("Invalid record");
+        break;
+      case bitc::BLOCKINFO_BLOCK_ID:
+        if (readBlockInfo())
+          return error("Malformed block");
+        break;
+      case bitc::PARAMATTR_BLOCK_ID:
+        if (Error Err = parseAttributeBlock())
+          return Err;
+        break;
+      case bitc::PARAMATTR_GROUP_BLOCK_ID:
+        if (Error Err = parseAttributeGroupBlock())
+          return Err;
+        break;
+      case bitc::TYPE_BLOCK_ID_NEW:
+        if (Error Err = parseTypeTable())
+          return Err;
+        break;
+      case bitc::VALUE_SYMTAB_BLOCK_ID:
+        if (!SeenValueSymbolTable) {
+          // Either this is an old form VST without function index and an
+          // associated VST forward declaration record (which would have caused
+          // the VST to be jumped to and parsed before it was encountered
+          // normally in the stream), or there were no function blocks to
+          // trigger an earlier parsing of the VST.
+          assert(VSTOffset == 0 || FunctionsWithBodies.empty());
+          if (Error Err = parseValueSymbolTable())
+            return Err;
+          SeenValueSymbolTable = true;
+        } else {
+          // We must have had a VST forward declaration record, which caused
+          // the parser to jump to and parse the VST earlier.
+          assert(VSTOffset > 0);
+          if (Stream.SkipBlock())
+            return error("Invalid record");
+        }
+        break;
+      case bitc::CONSTANTS_BLOCK_ID:
+        if (Error Err = parseConstants())
+          return Err;
+        if (Error Err = resolveGlobalAndIndirectSymbolInits())
+          return Err;
+        break;
+      case bitc::METADATA_BLOCK_ID:
+        if (ShouldLazyLoadMetadata) {
+          if (Error Err = rememberAndSkipMetadata())
+            return Err;
+          break;
+        }
+        assert(DeferredMetadataInfo.empty() && "Unexpected deferred metadata");
+        if (Error Err = MDLoader->parseModuleMetadata())
+          return Err;
+        break;
+      case bitc::METADATA_KIND_BLOCK_ID:
+        if (Error Err = MDLoader->parseMetadataKinds())
+          return Err;
+        break;
+      case bitc::FUNCTION_BLOCK_ID:
+        // If this is the first function body we've seen, reverse the
+        // FunctionsWithBodies list.
+        if (!SeenFirstFunctionBody) {
+          std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end());
+          if (Error Err = globalCleanup())
+            return Err;
+          SeenFirstFunctionBody = true;
+        }
+
+        if (VSTOffset > 0) {
+          // If we have a VST forward declaration record, make sure we
+          // parse the VST now if we haven't already. It is needed to
+          // set up the DeferredFunctionInfo vector for lazy reading.
+          if (!SeenValueSymbolTable) {
+            if (Error Err = BitcodeReader::parseValueSymbolTable(VSTOffset))
+              return Err;
+            SeenValueSymbolTable = true;
+            // Fall through so that we record the NextUnreadBit below.
+            // This is necessary in case we have an anonymous function that
+            // is later materialized. Since it will not have a VST entry we
+            // need to fall back to the lazy parse to find its offset.
+          } else {
+            // If we have a VST forward declaration record, but have already
+            // parsed the VST (just above, when the first function body was
+            // encountered here), then we are resuming the parse after
+            // materializing functions. The ResumeBit points to the
+            // start of the last function block recorded in the
+            // DeferredFunctionInfo map. Skip it.
+            if (Stream.SkipBlock())
+              return error("Invalid record");
+            continue;
+          }
+        }
+
+        // Support older bitcode files that did not have the function
+        // index in the VST, nor a VST forward declaration record, as
+        // well as anonymous functions that do not have VST entries.
+        // Build the DeferredFunctionInfo vector on the fly.
+        if (Error Err = rememberAndSkipFunctionBody())
+          return Err;
+
+        // Suspend parsing when we reach the function bodies. Subsequent
+        // materialization calls will resume it when necessary. If the bitcode
+        // file is old, the symbol table will be at the end instead and will not
+        // have been seen yet. In this case, just finish the parse now.
+        if (SeenValueSymbolTable) {
+          NextUnreadBit = Stream.GetCurrentBitNo();
+          // After the VST has been parsed, we need to make sure intrinsic name
+          // are auto-upgraded.
+          return globalCleanup();
+        }
+        break;
+      case bitc::USELIST_BLOCK_ID:
+        if (Error Err = parseUseLists())
+          return Err;
+        break;
+      case bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID:
+        if (Error Err = parseOperandBundleTags())
+          return Err;
+        break;
+      }
+      continue;
+
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    auto BitCode = Stream.readRecord(Entry.ID, Record);
+    switch (BitCode) {
+    default: break;  // Default behavior, ignore unknown content.
+    case bitc::MODULE_CODE_VERSION: {  // VERSION: [version#]
+      if (Record.size() < 1)
+        return error("Invalid record");
+      // Only version #0 and #1 are supported so far.
+      unsigned module_version = Record[0];
+      switch (module_version) {
+        default:
+          return error("Invalid value");
+        case 0:
+          UseRelativeIDs = false;
+          break;
+        case 1:
+          UseRelativeIDs = true;
+          break;
+      }
+      break;
+    }
+    case bitc::MODULE_CODE_TRIPLE: {  // TRIPLE: [strchr x N]
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      TheModule->setTargetTriple(S);
+      break;
+    }
+    case bitc::MODULE_CODE_DATALAYOUT: {  // DATALAYOUT: [strchr x N]
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      TheModule->setDataLayout(S);
+      break;
+    }
+    case bitc::MODULE_CODE_ASM: {  // ASM: [strchr x N]
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      TheModule->setModuleInlineAsm(S);
+      break;
+    }
+    case bitc::MODULE_CODE_DEPLIB: {  // DEPLIB: [strchr x N]
+      // FIXME: Remove in 4.0.
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      // Ignore value.
+      break;
+    }
+    case bitc::MODULE_CODE_SECTIONNAME: {  // SECTIONNAME: [strchr x N]
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      SectionTable.push_back(S);
+      break;
+    }
+    case bitc::MODULE_CODE_GCNAME: {  // SECTIONNAME: [strchr x N]
+      std::string S;
+      if (convertToString(Record, 0, S))
+        return error("Invalid record");
+      GCTable.push_back(S);
+      break;
+    }
+    case bitc::MODULE_CODE_COMDAT: { // COMDAT: [selection_kind, name]
+      if (Record.size() < 2)
+        return error("Invalid record");
+      Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]);
+      unsigned ComdatNameSize = Record[1];
+      std::string ComdatName;
+      ComdatName.reserve(ComdatNameSize);
+      for (unsigned i = 0; i != ComdatNameSize; ++i)
+        ComdatName += (char)Record[2 + i];
+      Comdat *C = TheModule->getOrInsertComdat(ComdatName);
+      C->setSelectionKind(SK);
+      ComdatList.push_back(C);
+      break;
+    }
+    // GLOBALVAR: [pointer type, isconst, initid,
+    //             linkage, alignment, section, visibility, threadlocal,
+    //             unnamed_addr, externally_initialized, dllstorageclass,
+    //             comdat]
+    case bitc::MODULE_CODE_GLOBALVAR: {
+      if (Record.size() < 6)
+        return error("Invalid record");
+      Type *Ty = getTypeByID(Record[0]);
+      if (!Ty)
+        return error("Invalid record");
+      bool isConstant = Record[1] & 1;
+      bool explicitType = Record[1] & 2;
+      unsigned AddressSpace;
+      if (explicitType) {
+        AddressSpace = Record[1] >> 2;
+      } else {
+        if (!Ty->isPointerTy())
+          return error("Invalid type for value");
+        AddressSpace = cast<PointerType>(Ty)->getAddressSpace();
+        Ty = cast<PointerType>(Ty)->getElementType();
+      }
+
+      uint64_t RawLinkage = Record[3];
+      GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage);
+      unsigned Alignment;
+      if (Error Err = parseAlignmentValue(Record[4], Alignment))
+        return Err;
+      std::string Section;
+      if (Record[5]) {
+        if (Record[5]-1 >= SectionTable.size())
+          return error("Invalid ID");
+        Section = SectionTable[Record[5]-1];
+      }
+      GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility;
+      // Local linkage must have default visibility.
+      if (Record.size() > 6 && !GlobalValue::isLocalLinkage(Linkage))
+        // FIXME: Change to an error if non-default in 4.0.
+        Visibility = getDecodedVisibility(Record[6]);
+
+      GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal;
+      if (Record.size() > 7)
+        TLM = getDecodedThreadLocalMode(Record[7]);
+
+      GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None;
+      if (Record.size() > 8)
+        UnnamedAddr = getDecodedUnnamedAddrType(Record[8]);
+
+      bool ExternallyInitialized = false;
+      if (Record.size() > 9)
+        ExternallyInitialized = Record[9];
+
+      GlobalVariable *NewGV =
+        new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, "", nullptr,
+                           TLM, AddressSpace, ExternallyInitialized);
+      NewGV->setAlignment(Alignment);
+      if (!Section.empty())
+        NewGV->setSection(Section);
+      NewGV->setVisibility(Visibility);
+      NewGV->setUnnamedAddr(UnnamedAddr);
+
+      if (Record.size() > 10)
+        NewGV->setDLLStorageClass(getDecodedDLLStorageClass(Record[10]));
+      else
+        upgradeDLLImportExportLinkage(NewGV, RawLinkage);
+
+      ValueList.push_back(NewGV);
+
+      // Remember which value to use for the global initializer.
+      if (unsigned InitID = Record[2])
+        GlobalInits.push_back(std::make_pair(NewGV, InitID-1));
+
+      if (Record.size() > 11) {
+        if (unsigned ComdatID = Record[11]) {
+          if (ComdatID > ComdatList.size())
+            return error("Invalid global variable comdat ID");
+          NewGV->setComdat(ComdatList[ComdatID - 1]);
+        }
+      } else if (hasImplicitComdat(RawLinkage)) {
+        NewGV->setComdat(reinterpret_cast<Comdat *>(1));
+      }
+
+      break;
+    }
+    // FUNCTION:  [type, callingconv, isproto, linkage, paramattr,
+    //             alignment, section, visibility, gc, unnamed_addr,
+    //             prologuedata, dllstorageclass, comdat, prefixdata]
+    case bitc::MODULE_CODE_FUNCTION: {
+      if (Record.size() < 8)
+        return error("Invalid record");
+      Type *Ty = getTypeByID(Record[0]);
+      if (!Ty)
+        return error("Invalid record");
+      if (auto *PTy = dyn_cast<PointerType>(Ty))
+        Ty = PTy->getElementType();
+      auto *FTy = dyn_cast<FunctionType>(Ty);
+      if (!FTy)
+        return error("Invalid type for value");
+      auto CC = static_cast<CallingConv::ID>(Record[1]);
+      if (CC & ~CallingConv::MaxID)
+        return error("Invalid calling convention ID");
+
+      Function *Func = Function::Create(FTy, GlobalValue::ExternalLinkage,
+                                        "", TheModule);
+
+      Func->setCallingConv(CC);
+      bool isProto = Record[2];
+      uint64_t RawLinkage = Record[3];
+      Func->setLinkage(getDecodedLinkage(RawLinkage));
+      Func->setAttributes(getAttributes(Record[4]));
+
+      unsigned Alignment;
+      if (Error Err = parseAlignmentValue(Record[5], Alignment))
+        return Err;
+      Func->setAlignment(Alignment);
+      if (Record[6]) {
+        if (Record[6]-1 >= SectionTable.size())
+          return error("Invalid ID");
+        Func->setSection(SectionTable[Record[6]-1]);
+      }
+      // Local linkage must have default visibility.
+      if (!Func->hasLocalLinkage())
+        // FIXME: Change to an error if non-default in 4.0.
+        Func->setVisibility(getDecodedVisibility(Record[7]));
+      if (Record.size() > 8 && Record[8]) {
+        if (Record[8]-1 >= GCTable.size())
+          return error("Invalid ID");
+        Func->setGC(GCTable[Record[8] - 1]);
+      }
+      GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None;
+      if (Record.size() > 9)
+        UnnamedAddr = getDecodedUnnamedAddrType(Record[9]);
+      Func->setUnnamedAddr(UnnamedAddr);
+      if (Record.size() > 10 && Record[10] != 0)
+        FunctionPrologues.push_back(std::make_pair(Func, Record[10]-1));
+
+      if (Record.size() > 11)
+        Func->setDLLStorageClass(getDecodedDLLStorageClass(Record[11]));
+      else
+        upgradeDLLImportExportLinkage(Func, RawLinkage);
+
+      if (Record.size() > 12) {
+        if (unsigned ComdatID = Record[12]) {
+          if (ComdatID > ComdatList.size())
+            return error("Invalid function comdat ID");
+          Func->setComdat(ComdatList[ComdatID - 1]);
+        }
+      } else if (hasImplicitComdat(RawLinkage)) {
+        Func->setComdat(reinterpret_cast<Comdat *>(1));
+      }
+
+      if (Record.size() > 13 && Record[13] != 0)
+        FunctionPrefixes.push_back(std::make_pair(Func, Record[13]-1));
+
+      if (Record.size() > 14 && Record[14] != 0)
+        FunctionPersonalityFns.push_back(std::make_pair(Func, Record[14] - 1));
+
+      ValueList.push_back(Func);
+
+      // If this is a function with a body, remember the prototype we are
+      // creating now, so that we can match up the body with them later.
+      if (!isProto) {
+        Func->setIsMaterializable(true);
+        FunctionsWithBodies.push_back(Func);
+        DeferredFunctionInfo[Func] = 0;
+      }
+      break;
+    }
+    // ALIAS: [alias type, addrspace, aliasee val#, linkage]
+    // ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility, dllstorageclass]
+    // IFUNC: [alias type, addrspace, aliasee val#, linkage, visibility, dllstorageclass]
+    case bitc::MODULE_CODE_IFUNC:
+    case bitc::MODULE_CODE_ALIAS:
+    case bitc::MODULE_CODE_ALIAS_OLD: {
+      bool NewRecord = BitCode != bitc::MODULE_CODE_ALIAS_OLD;
+      if (Record.size() < (3 + (unsigned)NewRecord))
+        return error("Invalid record");
+      unsigned OpNum = 0;
+      Type *Ty = getTypeByID(Record[OpNum++]);
+      if (!Ty)
+        return error("Invalid record");
+
+      unsigned AddrSpace;
+      if (!NewRecord) {
+        auto *PTy = dyn_cast<PointerType>(Ty);
+        if (!PTy)
+          return error("Invalid type for value");
+        Ty = PTy->getElementType();
+        AddrSpace = PTy->getAddressSpace();
+      } else {
+        AddrSpace = Record[OpNum++];
+      }
+
+      auto Val = Record[OpNum++];
+      auto Linkage = Record[OpNum++];
+      GlobalIndirectSymbol *NewGA;
+      if (BitCode == bitc::MODULE_CODE_ALIAS ||
+          BitCode == bitc::MODULE_CODE_ALIAS_OLD)
+        NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage),
+                                    "", TheModule);
+      else
+        NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage),
+                                    "", nullptr, TheModule);
+      // Old bitcode files didn't have visibility field.
+      // Local linkage must have default visibility.
+      if (OpNum != Record.size()) {
+        auto VisInd = OpNum++;
+        if (!NewGA->hasLocalLinkage())
+          // FIXME: Change to an error if non-default in 4.0.
+          NewGA->setVisibility(getDecodedVisibility(Record[VisInd]));
+      }
+      if (OpNum != Record.size())
+        NewGA->setDLLStorageClass(getDecodedDLLStorageClass(Record[OpNum++]));
+      else
+        upgradeDLLImportExportLinkage(NewGA, Linkage);
+      if (OpNum != Record.size())
+        NewGA->setThreadLocalMode(getDecodedThreadLocalMode(Record[OpNum++]));
+      if (OpNum != Record.size())
+        NewGA->setUnnamedAddr(getDecodedUnnamedAddrType(Record[OpNum++]));
+      ValueList.push_back(NewGA);
+      IndirectSymbolInits.push_back(std::make_pair(NewGA, Val));
+      break;
+    }
+    /// MODULE_CODE_PURGEVALS: [numvals]
+    case bitc::MODULE_CODE_PURGEVALS:
+      // Trim down the value list to the specified size.
+      if (Record.size() < 1 || Record[0] > ValueList.size())
+        return error("Invalid record");
+      ValueList.shrinkTo(Record[0]);
+      break;
+    /// MODULE_CODE_VSTOFFSET: [offset]
+    case bitc::MODULE_CODE_VSTOFFSET:
+      if (Record.size() < 1)
+        return error("Invalid record");
+      // Note that we subtract 1 here because the offset is relative to one word
+      // before the start of the identification or module block, which was
+      // historically always the start of the regular bitcode header.
+      VSTOffset = Record[0] - 1;
+      break;
+    /// MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+    case bitc::MODULE_CODE_SOURCE_FILENAME:
+      SmallString<128> ValueName;
+      if (convertToString(Record, 0, ValueName))
+        return error("Invalid record");
+      TheModule->setSourceFileName(ValueName);
+      break;
+    }
+    Record.clear();
+  }
+}
+
+Error BitcodeReader::parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata,
+                                      bool IsImporting) {
+  TheModule = M;
+  MDLoader = MetadataLoader(Stream, *M, ValueList, IsImporting,
+                            [&](unsigned ID) { return getTypeByID(ID); });
+  return parseModule(0, ShouldLazyLoadMetadata);
+}
+
+
+Error BitcodeReader::typeCheckLoadStoreInst(Type *ValType, Type *PtrType) {
+  if (!isa<PointerType>(PtrType))
+    return error("Load/Store operand is not a pointer type");
+  Type *ElemType = cast<PointerType>(PtrType)->getElementType();
+
+  if (ValType && ValType != ElemType)
+    return error("Explicit load/store type does not match pointee "
+                 "type of pointer operand");
+  if (!PointerType::isLoadableOrStorableType(ElemType))
+    return error("Cannot load/store from pointer");
+  return Error::success();
+}
+
+/// Lazily parse the specified function body block.
+Error BitcodeReader::parseFunctionBody(Function *F) {
+  if (Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID))
+    return error("Invalid record");
+
+  // Unexpected unresolved metadata when parsing function.
+  if (MDLoader->hasFwdRefs())
+    return error("Invalid function metadata: incoming forward references");
+
+  InstructionList.clear();
+  unsigned ModuleValueListSize = ValueList.size();
+  unsigned ModuleMDLoaderSize = MDLoader->size();
+
+  // Add all the function arguments to the value table.
+  for (Argument &I : F->args())
+    ValueList.push_back(&I);
+
+  unsigned NextValueNo = ValueList.size();
+  BasicBlock *CurBB = nullptr;
+  unsigned CurBBNo = 0;
+
+  DebugLoc LastLoc;
+  auto getLastInstruction = [&]() -> Instruction * {
+    if (CurBB && !CurBB->empty())
+      return &CurBB->back();
+    else if (CurBBNo && FunctionBBs[CurBBNo - 1] &&
+             !FunctionBBs[CurBBNo - 1]->empty())
+      return &FunctionBBs[CurBBNo - 1]->back();
+    return nullptr;
+  };
+
+  std::vector<OperandBundleDef> OperandBundles;
+
+  // Read all the records.
+  SmallVector<uint64_t, 64> Record;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      goto OutOfRecordLoop;
+
+    case BitstreamEntry::SubBlock:
+      switch (Entry.ID) {
+      default:  // Skip unknown content.
+        if (Stream.SkipBlock())
+          return error("Invalid record");
+        break;
+      case bitc::CONSTANTS_BLOCK_ID:
+        if (Error Err = parseConstants())
+          return Err;
+        NextValueNo = ValueList.size();
+        break;
+      case bitc::VALUE_SYMTAB_BLOCK_ID:
+        if (Error Err = parseValueSymbolTable())
+          return Err;
+        break;
+      case bitc::METADATA_ATTACHMENT_ID:
+        if (Error Err = MDLoader->parseMetadataAttachment(*F, InstructionList))
+          return Err;
+        break;
+      case bitc::METADATA_BLOCK_ID:
+        assert(DeferredMetadataInfo.empty() &&
+               "Must read all module-level metadata before function-level");
+        if (Error Err = MDLoader->parseFunctionMetadata())
+          return Err;
+        break;
+      case bitc::USELIST_BLOCK_ID:
+        if (Error Err = parseUseLists())
+          return Err;
+        break;
+      }
+      continue;
+
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    Instruction *I = nullptr;
+    unsigned BitCode = Stream.readRecord(Entry.ID, Record);
+    switch (BitCode) {
+    default: // Default behavior: reject
+      return error("Invalid value");
+    case bitc::FUNC_CODE_DECLAREBLOCKS: {   // DECLAREBLOCKS: [nblocks]
+      if (Record.size() < 1 || Record[0] == 0)
+        return error("Invalid record");
+      // Create all the basic blocks for the function.
+      FunctionBBs.resize(Record[0]);
+
+      // See if anything took the address of blocks in this function.
+      auto BBFRI = BasicBlockFwdRefs.find(F);
+      if (BBFRI == BasicBlockFwdRefs.end()) {
+        for (unsigned i = 0, e = FunctionBBs.size(); i != e; ++i)
+          FunctionBBs[i] = BasicBlock::Create(Context, "", F);
+      } else {
+        auto &BBRefs = BBFRI->second;
+        // Check for invalid basic block references.
+        if (BBRefs.size() > FunctionBBs.size())
+          return error("Invalid ID");
+        assert(!BBRefs.empty() && "Unexpected empty array");
+        assert(!BBRefs.front() && "Invalid reference to entry block");
+        for (unsigned I = 0, E = FunctionBBs.size(), RE = BBRefs.size(); I != E;
+             ++I)
+          if (I < RE && BBRefs[I]) {
+            BBRefs[I]->insertInto(F);
+            FunctionBBs[I] = BBRefs[I];
+          } else {
+            FunctionBBs[I] = BasicBlock::Create(Context, "", F);
+          }
+
+        // Erase from the table.
+        BasicBlockFwdRefs.erase(BBFRI);
+      }
+
+      CurBB = FunctionBBs[0];
+      continue;
+    }
+
+    case bitc::FUNC_CODE_DEBUG_LOC_AGAIN:  // DEBUG_LOC_AGAIN
+      // This record indicates that the last instruction is at the same
+      // location as the previous instruction with a location.
+      I = getLastInstruction();
+
+      if (!I)
+        return error("Invalid record");
+      I->setDebugLoc(LastLoc);
+      I = nullptr;
+      continue;
+
+    case bitc::FUNC_CODE_DEBUG_LOC: {      // DEBUG_LOC: [line, col, scope, ia]
+      I = getLastInstruction();
+      if (!I || Record.size() < 4)
+        return error("Invalid record");
+
+      unsigned Line = Record[0], Col = Record[1];
+      unsigned ScopeID = Record[2], IAID = Record[3];
+
+      MDNode *Scope = nullptr, *IA = nullptr;
+      if (ScopeID) {
+        Scope = MDLoader->getMDNodeFwdRefOrNull(ScopeID - 1);
+        if (!Scope)
+          return error("Invalid record");
+      }
+      if (IAID) {
+        IA = MDLoader->getMDNodeFwdRefOrNull(IAID - 1);
+        if (!IA)
+          return error("Invalid record");
+      }
+      LastLoc = DebugLoc::get(Line, Col, Scope, IA);
+      I->setDebugLoc(LastLoc);
+      I = nullptr;
+      continue;
+    }
+
+    case bitc::FUNC_CODE_INST_BINOP: {    // BINOP: [opval, ty, opval, opcode]
+      unsigned OpNum = 0;
+      Value *LHS, *RHS;
+      if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
+          popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
+          OpNum+1 > Record.size())
+        return error("Invalid record");
+
+      int Opc = getDecodedBinaryOpcode(Record[OpNum++], LHS->getType());
+      if (Opc == -1)
+        return error("Invalid record");
+      I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
+      InstructionList.push_back(I);
+      if (OpNum < Record.size()) {
+        if (Opc == Instruction::Add ||
+            Opc == Instruction::Sub ||
+            Opc == Instruction::Mul ||
+            Opc == Instruction::Shl) {
+          if (Record[OpNum] & (1 << bitc::OBO_NO_SIGNED_WRAP))
+            cast<BinaryOperator>(I)->setHasNoSignedWrap(true);
+          if (Record[OpNum] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
+            cast<BinaryOperator>(I)->setHasNoUnsignedWrap(true);
+        } else if (Opc == Instruction::SDiv ||
+                   Opc == Instruction::UDiv ||
+                   Opc == Instruction::LShr ||
+                   Opc == Instruction::AShr) {
+          if (Record[OpNum] & (1 << bitc::PEO_EXACT))
+            cast<BinaryOperator>(I)->setIsExact(true);
+        } else if (isa<FPMathOperator>(I)) {
+          FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]);
+          if (FMF.any())
+            I->setFastMathFlags(FMF);
+        }
+
+      }
+      break;
+    }
+    case bitc::FUNC_CODE_INST_CAST: {    // CAST: [opval, opty, destty, castopc]
+      unsigned OpNum = 0;
+      Value *Op;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
+          OpNum+2 != Record.size())
+        return error("Invalid record");
+
+      Type *ResTy = getTypeByID(Record[OpNum]);
+      int Opc = getDecodedCastOpcode(Record[OpNum + 1]);
+      if (Opc == -1 || !ResTy)
+        return error("Invalid record");
+      Instruction *Temp = nullptr;
+      if ((I = UpgradeBitCastInst(Opc, Op, ResTy, Temp))) {
+        if (Temp) {
+          InstructionList.push_back(Temp);
+          CurBB->getInstList().push_back(Temp);
+        }
+      } else {
+        auto CastOp = (Instruction::CastOps)Opc;
+        if (!CastInst::castIsValid(CastOp, Op, ResTy))
+          return error("Invalid cast");
+        I = CastInst::Create(CastOp, Op, ResTy);
+      }
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD:
+    case bitc::FUNC_CODE_INST_GEP_OLD:
+    case bitc::FUNC_CODE_INST_GEP: { // GEP: type, [n x operands]
+      unsigned OpNum = 0;
+
+      Type *Ty;
+      bool InBounds;
+
+      if (BitCode == bitc::FUNC_CODE_INST_GEP) {
+        InBounds = Record[OpNum++];
+        Ty = getTypeByID(Record[OpNum++]);
+      } else {
+        InBounds = BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD;
+        Ty = nullptr;
+      }
+
+      Value *BasePtr;
+      if (getValueTypePair(Record, OpNum, NextValueNo, BasePtr))
+        return error("Invalid record");
+
+      if (!Ty)
+        Ty = cast<PointerType>(BasePtr->getType()->getScalarType())
+                 ->getElementType();
+      else if (Ty !=
+               cast<PointerType>(BasePtr->getType()->getScalarType())
+                   ->getElementType())
+        return error(
+            "Explicit gep type does not match pointee type of pointer operand");
+
+      SmallVector<Value*, 16> GEPIdx;
+      while (OpNum != Record.size()) {
+        Value *Op;
+        if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+          return error("Invalid record");
+        GEPIdx.push_back(Op);
+      }
+
+      I = GetElementPtrInst::Create(Ty, BasePtr, GEPIdx);
+
+      InstructionList.push_back(I);
+      if (InBounds)
+        cast<GetElementPtrInst>(I)->setIsInBounds(true);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_EXTRACTVAL: {
+                                       // EXTRACTVAL: [opty, opval, n x indices]
+      unsigned OpNum = 0;
+      Value *Agg;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
+        return error("Invalid record");
+
+      unsigned RecSize = Record.size();
+      if (OpNum == RecSize)
+        return error("EXTRACTVAL: Invalid instruction with 0 indices");
+
+      SmallVector<unsigned, 4> EXTRACTVALIdx;
+      Type *CurTy = Agg->getType();
+      for (; OpNum != RecSize; ++OpNum) {
+        bool IsArray = CurTy->isArrayTy();
+        bool IsStruct = CurTy->isStructTy();
+        uint64_t Index = Record[OpNum];
+
+        if (!IsStruct && !IsArray)
+          return error("EXTRACTVAL: Invalid type");
+        if ((unsigned)Index != Index)
+          return error("Invalid value");
+        if (IsStruct && Index >= CurTy->subtypes().size())
+          return error("EXTRACTVAL: Invalid struct index");
+        if (IsArray && Index >= CurTy->getArrayNumElements())
+          return error("EXTRACTVAL: Invalid array index");
+        EXTRACTVALIdx.push_back((unsigned)Index);
+
+        if (IsStruct)
+          CurTy = CurTy->subtypes()[Index];
+        else
+          CurTy = CurTy->subtypes()[0];
+      }
+
+      I = ExtractValueInst::Create(Agg, EXTRACTVALIdx);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_INSERTVAL: {
+                           // INSERTVAL: [opty, opval, opty, opval, n x indices]
+      unsigned OpNum = 0;
+      Value *Agg;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
+        return error("Invalid record");
+      Value *Val;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Val))
+        return error("Invalid record");
+
+      unsigned RecSize = Record.size();
+      if (OpNum == RecSize)
+        return error("INSERTVAL: Invalid instruction with 0 indices");
+
+      SmallVector<unsigned, 4> INSERTVALIdx;
+      Type *CurTy = Agg->getType();
+      for (; OpNum != RecSize; ++OpNum) {
+        bool IsArray = CurTy->isArrayTy();
+        bool IsStruct = CurTy->isStructTy();
+        uint64_t Index = Record[OpNum];
+
+        if (!IsStruct && !IsArray)
+          return error("INSERTVAL: Invalid type");
+        if ((unsigned)Index != Index)
+          return error("Invalid value");
+        if (IsStruct && Index >= CurTy->subtypes().size())
+          return error("INSERTVAL: Invalid struct index");
+        if (IsArray && Index >= CurTy->getArrayNumElements())
+          return error("INSERTVAL: Invalid array index");
+
+        INSERTVALIdx.push_back((unsigned)Index);
+        if (IsStruct)
+          CurTy = CurTy->subtypes()[Index];
+        else
+          CurTy = CurTy->subtypes()[0];
+      }
+
+      if (CurTy != Val->getType())
+        return error("Inserted value type doesn't match aggregate type");
+
+      I = InsertValueInst::Create(Agg, Val, INSERTVALIdx);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_SELECT: { // SELECT: [opval, ty, opval, opval]
+      // obsolete form of select
+      // handles select i1 ... in old bitcode
+      unsigned OpNum = 0;
+      Value *TrueVal, *FalseVal, *Cond;
+      if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
+          popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
+          popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond))
+        return error("Invalid record");
+
+      I = SelectInst::Create(Cond, TrueVal, FalseVal);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_VSELECT: {// VSELECT: [ty,opval,opval,predty,pred]
+      // new form of select
+      // handles select i1 or select [N x i1]
+      unsigned OpNum = 0;
+      Value *TrueVal, *FalseVal, *Cond;
+      if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
+          popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
+          getValueTypePair(Record, OpNum, NextValueNo, Cond))
+        return error("Invalid record");
+
+      // select condition can be either i1 or [N x i1]
+      if (VectorType* vector_type =
+          dyn_cast<VectorType>(Cond->getType())) {
+        // expect <n x i1>
+        if (vector_type->getElementType() != Type::getInt1Ty(Context))
+          return error("Invalid type for value");
+      } else {
+        // expect i1
+        if (Cond->getType() != Type::getInt1Ty(Context))
+          return error("Invalid type for value");
+      }
+
+      I = SelectInst::Create(Cond, TrueVal, FalseVal);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_EXTRACTELT: { // EXTRACTELT: [opty, opval, opval]
+      unsigned OpNum = 0;
+      Value *Vec, *Idx;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
+          getValueTypePair(Record, OpNum, NextValueNo, Idx))
+        return error("Invalid record");
+      if (!Vec->getType()->isVectorTy())
+        return error("Invalid type for value");
+      I = ExtractElementInst::Create(Vec, Idx);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_INSERTELT: { // INSERTELT: [ty, opval,opval,opval]
+      unsigned OpNum = 0;
+      Value *Vec, *Elt, *Idx;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Vec))
+        return error("Invalid record");
+      if (!Vec->getType()->isVectorTy())
+        return error("Invalid type for value");
+      if (popValue(Record, OpNum, NextValueNo,
+                   cast<VectorType>(Vec->getType())->getElementType(), Elt) ||
+          getValueTypePair(Record, OpNum, NextValueNo, Idx))
+        return error("Invalid record");
+      I = InsertElementInst::Create(Vec, Elt, Idx);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_SHUFFLEVEC: {// SHUFFLEVEC: [opval,ty,opval,opval]
+      unsigned OpNum = 0;
+      Value *Vec1, *Vec2, *Mask;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) ||
+          popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2))
+        return error("Invalid record");
+
+      if (getValueTypePair(Record, OpNum, NextValueNo, Mask))
+        return error("Invalid record");
+      if (!Vec1->getType()->isVectorTy() || !Vec2->getType()->isVectorTy())
+        return error("Invalid type for value");
+      I = new ShuffleVectorInst(Vec1, Vec2, Mask);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_CMP:   // CMP: [opty, opval, opval, pred]
+      // Old form of ICmp/FCmp returning bool
+      // Existed to differentiate between icmp/fcmp and vicmp/vfcmp which were
+      // both legal on vectors but had different behaviour.
+    case bitc::FUNC_CODE_INST_CMP2: { // CMP2: [opty, opval, opval, pred]
+      // FCmp/ICmp returning bool or vector of bool
+
+      unsigned OpNum = 0;
+      Value *LHS, *RHS;
+      if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
+          popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS))
+        return error("Invalid record");
+
+      unsigned PredVal = Record[OpNum];
+      bool IsFP = LHS->getType()->isFPOrFPVectorTy();
+      FastMathFlags FMF;
+      if (IsFP && Record.size() > OpNum+1)
+        FMF = getDecodedFastMathFlags(Record[++OpNum]);
+
+      if (OpNum+1 != Record.size())
+        return error("Invalid record");
+
+      if (LHS->getType()->isFPOrFPVectorTy())
+        I = new FCmpInst((FCmpInst::Predicate)PredVal, LHS, RHS);
+      else
+        I = new ICmpInst((ICmpInst::Predicate)PredVal, LHS, RHS);
+
+      if (FMF.any())
+        I->setFastMathFlags(FMF);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_RET: // RET: [opty,opval<optional>]
+      {
+        unsigned Size = Record.size();
+        if (Size == 0) {
+          I = ReturnInst::Create(Context);
+          InstructionList.push_back(I);
+          break;
+        }
+
+        unsigned OpNum = 0;
+        Value *Op = nullptr;
+        if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+          return error("Invalid record");
+        if (OpNum != Record.size())
+          return error("Invalid record");
+
+        I = ReturnInst::Create(Context, Op);
+        InstructionList.push_back(I);
+        break;
+      }
+    case bitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#]
+      if (Record.size() != 1 && Record.size() != 3)
+        return error("Invalid record");
+      BasicBlock *TrueDest = getBasicBlock(Record[0]);
+      if (!TrueDest)
+        return error("Invalid record");
+
+      if (Record.size() == 1) {
+        I = BranchInst::Create(TrueDest);
+        InstructionList.push_back(I);
+      }
+      else {
+        BasicBlock *FalseDest = getBasicBlock(Record[1]);
+        Value *Cond = getValue(Record, 2, NextValueNo,
+                               Type::getInt1Ty(Context));
+        if (!FalseDest || !Cond)
+          return error("Invalid record");
+        I = BranchInst::Create(TrueDest, FalseDest, Cond);
+        InstructionList.push_back(I);
+      }
+      break;
+    }
+    case bitc::FUNC_CODE_INST_CLEANUPRET: { // CLEANUPRET: [val] or [val,bb#]
+      if (Record.size() != 1 && Record.size() != 2)
+        return error("Invalid record");
+      unsigned Idx = 0;
+      Value *CleanupPad =
+          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+      if (!CleanupPad)
+        return error("Invalid record");
+      BasicBlock *UnwindDest = nullptr;
+      if (Record.size() == 2) {
+        UnwindDest = getBasicBlock(Record[Idx++]);
+        if (!UnwindDest)
+          return error("Invalid record");
+      }
+
+      I = CleanupReturnInst::Create(CleanupPad, UnwindDest);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_CATCHRET: { // CATCHRET: [val,bb#]
+      if (Record.size() != 2)
+        return error("Invalid record");
+      unsigned Idx = 0;
+      Value *CatchPad =
+          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+      if (!CatchPad)
+        return error("Invalid record");
+      BasicBlock *BB = getBasicBlock(Record[Idx++]);
+      if (!BB)
+        return error("Invalid record");
+
+      I = CatchReturnInst::Create(CatchPad, BB);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_CATCHSWITCH: { // CATCHSWITCH: [tok,num,(bb)*,bb?]
+      // We must have, at minimum, the outer scope and the number of arguments.
+      if (Record.size() < 2)
+        return error("Invalid record");
+
+      unsigned Idx = 0;
+
+      Value *ParentPad =
+          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+
+      unsigned NumHandlers = Record[Idx++];
+
+      SmallVector<BasicBlock *, 2> Handlers;
+      for (unsigned Op = 0; Op != NumHandlers; ++Op) {
+        BasicBlock *BB = getBasicBlock(Record[Idx++]);
+        if (!BB)
+          return error("Invalid record");
+        Handlers.push_back(BB);
+      }
+
+      BasicBlock *UnwindDest = nullptr;
+      if (Idx + 1 == Record.size()) {
+        UnwindDest = getBasicBlock(Record[Idx++]);
+        if (!UnwindDest)
+          return error("Invalid record");
+      }
+
+      if (Record.size() != Idx)
+        return error("Invalid record");
+
+      auto *CatchSwitch =
+          CatchSwitchInst::Create(ParentPad, UnwindDest, NumHandlers);
+      for (BasicBlock *Handler : Handlers)
+        CatchSwitch->addHandler(Handler);
+      I = CatchSwitch;
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_CATCHPAD:
+    case bitc::FUNC_CODE_INST_CLEANUPPAD: { // [tok,num,(ty,val)*]
+      // We must have, at minimum, the outer scope and the number of arguments.
+      if (Record.size() < 2)
+        return error("Invalid record");
+
+      unsigned Idx = 0;
+
+      Value *ParentPad =
+          getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+
+      unsigned NumArgOperands = Record[Idx++];
+
+      SmallVector<Value *, 2> Args;
+      for (unsigned Op = 0; Op != NumArgOperands; ++Op) {
+        Value *Val;
+        if (getValueTypePair(Record, Idx, NextValueNo, Val))
+          return error("Invalid record");
+        Args.push_back(Val);
+      }
+
+      if (Record.size() != Idx)
+        return error("Invalid record");
+
+      if (BitCode == bitc::FUNC_CODE_INST_CLEANUPPAD)
+        I = CleanupPadInst::Create(ParentPad, Args);
+      else
+        I = CatchPadInst::Create(ParentPad, Args);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...]
+      // Check magic
+      if ((Record[0] >> 16) == SWITCH_INST_MAGIC) {
+        // "New" SwitchInst format with case ranges. The changes to write this
+        // format were reverted but we still recognize bitcode that uses it.
+        // Hopefully someday we will have support for case ranges and can use
+        // this format again.
+
+        Type *OpTy = getTypeByID(Record[1]);
+        unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth();
+
+        Value *Cond = getValue(Record, 2, NextValueNo, OpTy);
+        BasicBlock *Default = getBasicBlock(Record[3]);
+        if (!OpTy || !Cond || !Default)
+          return error("Invalid record");
+
+        unsigned NumCases = Record[4];
+
+        SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
+        InstructionList.push_back(SI);
+
+        unsigned CurIdx = 5;
+        for (unsigned i = 0; i != NumCases; ++i) {
+          SmallVector<ConstantInt*, 1> CaseVals;
+          unsigned NumItems = Record[CurIdx++];
+          for (unsigned ci = 0; ci != NumItems; ++ci) {
+            bool isSingleNumber = Record[CurIdx++];
+
+            APInt Low;
+            unsigned ActiveWords = 1;
+            if (ValueBitWidth > 64)
+              ActiveWords = Record[CurIdx++];
+            Low = readWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords),
+                                ValueBitWidth);
+            CurIdx += ActiveWords;
+
+            if (!isSingleNumber) {
+              ActiveWords = 1;
+              if (ValueBitWidth > 64)
+                ActiveWords = Record[CurIdx++];
+              APInt High = readWideAPInt(
+                  makeArrayRef(&Record[CurIdx], ActiveWords), ValueBitWidth);
+              CurIdx += ActiveWords;
+
+              // FIXME: It is not clear whether values in the range should be
+              // compared as signed or unsigned values. The partially
+              // implemented changes that used this format in the past used
+              // unsigned comparisons.
+              for ( ; Low.ule(High); ++Low)
+                CaseVals.push_back(ConstantInt::get(Context, Low));
+            } else
+              CaseVals.push_back(ConstantInt::get(Context, Low));
+          }
+          BasicBlock *DestBB = getBasicBlock(Record[CurIdx++]);
+          for (SmallVector<ConstantInt*, 1>::iterator cvi = CaseVals.begin(),
+                 cve = CaseVals.end(); cvi != cve; ++cvi)
+            SI->addCase(*cvi, DestBB);
+        }
+        I = SI;
+        break;
+      }
+
+      // Old SwitchInst format without case ranges.
+
+      if (Record.size() < 3 || (Record.size() & 1) == 0)
+        return error("Invalid record");
+      Type *OpTy = getTypeByID(Record[0]);
+      Value *Cond = getValue(Record, 1, NextValueNo, OpTy);
+      BasicBlock *Default = getBasicBlock(Record[2]);
+      if (!OpTy || !Cond || !Default)
+        return error("Invalid record");
+      unsigned NumCases = (Record.size()-3)/2;
+      SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
+      InstructionList.push_back(SI);
+      for (unsigned i = 0, e = NumCases; i != e; ++i) {
+        ConstantInt *CaseVal =
+          dyn_cast_or_null<ConstantInt>(getFnValueByID(Record[3+i*2], OpTy));
+        BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]);
+        if (!CaseVal || !DestBB) {
+          delete SI;
+          return error("Invalid record");
+        }
+        SI->addCase(CaseVal, DestBB);
+      }
+      I = SI;
+      break;
+    }
+    case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...]
+      if (Record.size() < 2)
+        return error("Invalid record");
+      Type *OpTy = getTypeByID(Record[0]);
+      Value *Address = getValue(Record, 1, NextValueNo, OpTy);
+      if (!OpTy || !Address)
+        return error("Invalid record");
+      unsigned NumDests = Record.size()-2;
+      IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests);
+      InstructionList.push_back(IBI);
+      for (unsigned i = 0, e = NumDests; i != e; ++i) {
+        if (BasicBlock *DestBB = getBasicBlock(Record[2+i])) {
+          IBI->addDestination(DestBB);
+        } else {
+          delete IBI;
+          return error("Invalid record");
+        }
+      }
+      I = IBI;
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_INVOKE: {
+      // INVOKE: [attrs, cc, normBB, unwindBB, fnty, op0,op1,op2, ...]
+      if (Record.size() < 4)
+        return error("Invalid record");
+      unsigned OpNum = 0;
+      AttributeSet PAL = getAttributes(Record[OpNum++]);
+      unsigned CCInfo = Record[OpNum++];
+      BasicBlock *NormalBB = getBasicBlock(Record[OpNum++]);
+      BasicBlock *UnwindBB = getBasicBlock(Record[OpNum++]);
+
+      FunctionType *FTy = nullptr;
+      if (CCInfo >> 13 & 1 &&
+          !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]))))
+        return error("Explicit invoke type is not a function type");
+
+      Value *Callee;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
+        return error("Invalid record");
+
+      PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
+      if (!CalleeTy)
+        return error("Callee is not a pointer");
+      if (!FTy) {
+        FTy = dyn_cast<FunctionType>(CalleeTy->getElementType());
+        if (!FTy)
+          return error("Callee is not of pointer to function type");
+      } else if (CalleeTy->getElementType() != FTy)
+        return error("Explicit invoke type does not match pointee type of "
+                     "callee operand");
+      if (Record.size() < FTy->getNumParams() + OpNum)
+        return error("Insufficient operands to call");
+
+      SmallVector<Value*, 16> Ops;
+      for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
+        Ops.push_back(getValue(Record, OpNum, NextValueNo,
+                               FTy->getParamType(i)));
+        if (!Ops.back())
+          return error("Invalid record");
+      }
+
+      if (!FTy->isVarArg()) {
+        if (Record.size() != OpNum)
+          return error("Invalid record");
+      } else {
+        // Read type/value pairs for varargs params.
+        while (OpNum != Record.size()) {
+          Value *Op;
+          if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+            return error("Invalid record");
+          Ops.push_back(Op);
+        }
+      }
+
+      I = InvokeInst::Create(Callee, NormalBB, UnwindBB, Ops, OperandBundles);
+      OperandBundles.clear();
+      InstructionList.push_back(I);
+      cast<InvokeInst>(I)->setCallingConv(
+          static_cast<CallingConv::ID>(CallingConv::MaxID & CCInfo));
+      cast<InvokeInst>(I)->setAttributes(PAL);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_RESUME: { // RESUME: [opval]
+      unsigned Idx = 0;
+      Value *Val = nullptr;
+      if (getValueTypePair(Record, Idx, NextValueNo, Val))
+        return error("Invalid record");
+      I = ResumeInst::Create(Val);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE
+      I = new UnreachableInst(Context);
+      InstructionList.push_back(I);
+      break;
+    case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...]
+      if (Record.size() < 1 || ((Record.size()-1)&1))
+        return error("Invalid record");
+      Type *Ty = getTypeByID(Record[0]);
+      if (!Ty)
+        return error("Invalid record");
+
+      PHINode *PN = PHINode::Create(Ty, (Record.size()-1)/2);
+      InstructionList.push_back(PN);
+
+      for (unsigned i = 0, e = Record.size()-1; i != e; i += 2) {
+        Value *V;
+        // With the new function encoding, it is possible that operands have
+        // negative IDs (for forward references).  Use a signed VBR
+        // representation to keep the encoding small.
+        if (UseRelativeIDs)
+          V = getValueSigned(Record, 1+i, NextValueNo, Ty);
+        else
+          V = getValue(Record, 1+i, NextValueNo, Ty);
+        BasicBlock *BB = getBasicBlock(Record[2+i]);
+        if (!V || !BB)
+          return error("Invalid record");
+        PN->addIncoming(V, BB);
+      }
+      I = PN;
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_LANDINGPAD:
+    case bitc::FUNC_CODE_INST_LANDINGPAD_OLD: {
+      // LANDINGPAD: [ty, val, val, num, (id0,val0 ...)?]
+      unsigned Idx = 0;
+      if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD) {
+        if (Record.size() < 3)
+          return error("Invalid record");
+      } else {
+        assert(BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD);
+        if (Record.size() < 4)
+          return error("Invalid record");
+      }
+      Type *Ty = getTypeByID(Record[Idx++]);
+      if (!Ty)
+        return error("Invalid record");
+      if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD) {
+        Value *PersFn = nullptr;
+        if (getValueTypePair(Record, Idx, NextValueNo, PersFn))
+          return error("Invalid record");
+
+        if (!F->hasPersonalityFn())
+          F->setPersonalityFn(cast<Constant>(PersFn));
+        else if (F->getPersonalityFn() != cast<Constant>(PersFn))
+          return error("Personality function mismatch");
+      }
+
+      bool IsCleanup = !!Record[Idx++];
+      unsigned NumClauses = Record[Idx++];
+      LandingPadInst *LP = LandingPadInst::Create(Ty, NumClauses);
+      LP->setCleanup(IsCleanup);
+      for (unsigned J = 0; J != NumClauses; ++J) {
+        LandingPadInst::ClauseType CT =
+          LandingPadInst::ClauseType(Record[Idx++]); (void)CT;
+        Value *Val;
+
+        if (getValueTypePair(Record, Idx, NextValueNo, Val)) {
+          delete LP;
+          return error("Invalid record");
+        }
+
+        assert((CT != LandingPadInst::Catch ||
+                !isa<ArrayType>(Val->getType())) &&
+               "Catch clause has a invalid type!");
+        assert((CT != LandingPadInst::Filter ||
+                isa<ArrayType>(Val->getType())) &&
+               "Filter clause has invalid type!");
+        LP->addClause(cast<Constant>(Val));
+      }
+
+      I = LP;
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align]
+      if (Record.size() != 4)
+        return error("Invalid record");
+      uint64_t AlignRecord = Record[3];
+      const uint64_t InAllocaMask = uint64_t(1) << 5;
+      const uint64_t ExplicitTypeMask = uint64_t(1) << 6;
+      const uint64_t SwiftErrorMask = uint64_t(1) << 7;
+      const uint64_t FlagMask = InAllocaMask | ExplicitTypeMask |
+                                SwiftErrorMask;
+      bool InAlloca = AlignRecord & InAllocaMask;
+      bool SwiftError = AlignRecord & SwiftErrorMask;
+      Type *Ty = getTypeByID(Record[0]);
+      if ((AlignRecord & ExplicitTypeMask) == 0) {
+        auto *PTy = dyn_cast_or_null<PointerType>(Ty);
+        if (!PTy)
+          return error("Old-style alloca with a non-pointer type");
+        Ty = PTy->getElementType();
+      }
+      Type *OpTy = getTypeByID(Record[1]);
+      Value *Size = getFnValueByID(Record[2], OpTy);
+      unsigned Align;
+      if (Error Err = parseAlignmentValue(AlignRecord & ~FlagMask, Align)) {
+        return Err;
+      }
+      if (!Ty || !Size)
+        return error("Invalid record");
+      AllocaInst *AI = new AllocaInst(Ty, Size, Align);
+      AI->setUsedWithInAlloca(InAlloca);
+      AI->setSwiftError(SwiftError);
+      I = AI;
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_LOAD: { // LOAD: [opty, op, align, vol]
+      unsigned OpNum = 0;
+      Value *Op;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
+          (OpNum + 2 != Record.size() && OpNum + 3 != Record.size()))
+        return error("Invalid record");
+
+      Type *Ty = nullptr;
+      if (OpNum + 3 == Record.size())
+        Ty = getTypeByID(Record[OpNum++]);
+      if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType()))
+        return Err;
+      if (!Ty)
+        Ty = cast<PointerType>(Op->getType())->getElementType();
+
+      unsigned Align;
+      if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+        return Err;
+      I = new LoadInst(Ty, Op, "", Record[OpNum + 1], Align);
+
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_LOADATOMIC: {
+       // LOADATOMIC: [opty, op, align, vol, ordering, synchscope]
+      unsigned OpNum = 0;
+      Value *Op;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
+          (OpNum + 4 != Record.size() && OpNum + 5 != Record.size()))
+        return error("Invalid record");
+
+      Type *Ty = nullptr;
+      if (OpNum + 5 == Record.size())
+        Ty = getTypeByID(Record[OpNum++]);
+      if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType()))
+        return Err;
+      if (!Ty)
+        Ty = cast<PointerType>(Op->getType())->getElementType();
+
+      AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
+      if (Ordering == AtomicOrdering::NotAtomic ||
+          Ordering == AtomicOrdering::Release ||
+          Ordering == AtomicOrdering::AcquireRelease)
+        return error("Invalid record");
+      if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
+        return error("Invalid record");
+      SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
+
+      unsigned Align;
+      if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+        return Err;
+      I = new LoadInst(Op, "", Record[OpNum+1], Align, Ordering, SynchScope);
+
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_STORE:
+    case bitc::FUNC_CODE_INST_STORE_OLD: { // STORE2:[ptrty, ptr, val, align, vol]
+      unsigned OpNum = 0;
+      Value *Val, *Ptr;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+          (BitCode == bitc::FUNC_CODE_INST_STORE
+               ? getValueTypePair(Record, OpNum, NextValueNo, Val)
+               : popValue(Record, OpNum, NextValueNo,
+                          cast<PointerType>(Ptr->getType())->getElementType(),
+                          Val)) ||
+          OpNum + 2 != Record.size())
+        return error("Invalid record");
+
+      if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType()))
+        return Err;
+      unsigned Align;
+      if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+        return Err;
+      I = new StoreInst(Val, Ptr, Record[OpNum+1], Align);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_STOREATOMIC:
+    case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: {
+      // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, synchscope]
+      unsigned OpNum = 0;
+      Value *Val, *Ptr;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+          !isa<PointerType>(Ptr->getType()) ||
+          (BitCode == bitc::FUNC_CODE_INST_STOREATOMIC
+               ? getValueTypePair(Record, OpNum, NextValueNo, Val)
+               : popValue(Record, OpNum, NextValueNo,
+                          cast<PointerType>(Ptr->getType())->getElementType(),
+                          Val)) ||
+          OpNum + 4 != Record.size())
+        return error("Invalid record");
+
+      if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType()))
+        return Err;
+      AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
+      if (Ordering == AtomicOrdering::NotAtomic ||
+          Ordering == AtomicOrdering::Acquire ||
+          Ordering == AtomicOrdering::AcquireRelease)
+        return error("Invalid record");
+      SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
+      if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
+        return error("Invalid record");
+
+      unsigned Align;
+      if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+        return Err;
+      I = new StoreInst(Val, Ptr, Record[OpNum+1], Align, Ordering, SynchScope);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_CMPXCHG_OLD:
+    case bitc::FUNC_CODE_INST_CMPXCHG: {
+      // CMPXCHG:[ptrty, ptr, cmp, new, vol, successordering, synchscope,
+      //          failureordering?, isweak?]
+      unsigned OpNum = 0;
+      Value *Ptr, *Cmp, *New;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+          (BitCode == bitc::FUNC_CODE_INST_CMPXCHG
+               ? getValueTypePair(Record, OpNum, NextValueNo, Cmp)
+               : popValue(Record, OpNum, NextValueNo,
+                          cast<PointerType>(Ptr->getType())->getElementType(),
+                          Cmp)) ||
+          popValue(Record, OpNum, NextValueNo, Cmp->getType(), New) ||
+          Record.size() < OpNum + 3 || Record.size() > OpNum + 5)
+        return error("Invalid record");
+      AtomicOrdering SuccessOrdering = getDecodedOrdering(Record[OpNum + 1]);
+      if (SuccessOrdering == AtomicOrdering::NotAtomic ||
+          SuccessOrdering == AtomicOrdering::Unordered)
+        return error("Invalid record");
+      SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 2]);
+
+      if (Error Err = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType()))
+        return Err;
+      AtomicOrdering FailureOrdering;
+      if (Record.size() < 7)
+        FailureOrdering =
+            AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering);
+      else
+        FailureOrdering = getDecodedOrdering(Record[OpNum + 3]);
+
+      I = new AtomicCmpXchgInst(Ptr, Cmp, New, SuccessOrdering, FailureOrdering,
+                                SynchScope);
+      cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
+
+      if (Record.size() < 8) {
+        // Before weak cmpxchgs existed, the instruction simply returned the
+        // value loaded from memory, so bitcode files from that era will be
+        // expecting the first component of a modern cmpxchg.
+        CurBB->getInstList().push_back(I);
+        I = ExtractValueInst::Create(I, 0);
+      } else {
+        cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum+4]);
+      }
+
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_ATOMICRMW: {
+      // ATOMICRMW:[ptrty, ptr, val, op, vol, ordering, synchscope]
+      unsigned OpNum = 0;
+      Value *Ptr, *Val;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+          !isa<PointerType>(Ptr->getType()) ||
+          popValue(Record, OpNum, NextValueNo,
+                    cast<PointerType>(Ptr->getType())->getElementType(), Val) ||
+          OpNum+4 != Record.size())
+        return error("Invalid record");
+      AtomicRMWInst::BinOp Operation = getDecodedRMWOperation(Record[OpNum]);
+      if (Operation < AtomicRMWInst::FIRST_BINOP ||
+          Operation > AtomicRMWInst::LAST_BINOP)
+        return error("Invalid record");
+      AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
+      if (Ordering == AtomicOrdering::NotAtomic ||
+          Ordering == AtomicOrdering::Unordered)
+        return error("Invalid record");
+      SynchronizationScope SynchScope = getDecodedSynchScope(Record[OpNum + 3]);
+      I = new AtomicRMWInst(Operation, Ptr, Val, Ordering, SynchScope);
+      cast<AtomicRMWInst>(I)->setVolatile(Record[OpNum+1]);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, synchscope]
+      if (2 != Record.size())
+        return error("Invalid record");
+      AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
+      if (Ordering == AtomicOrdering::NotAtomic ||
+          Ordering == AtomicOrdering::Unordered ||
+          Ordering == AtomicOrdering::Monotonic)
+        return error("Invalid record");
+      SynchronizationScope SynchScope = getDecodedSynchScope(Record[1]);
+      I = new FenceInst(Context, Ordering, SynchScope);
+      InstructionList.push_back(I);
+      break;
+    }
+    case bitc::FUNC_CODE_INST_CALL: {
+      // CALL: [paramattrs, cc, fmf, fnty, fnid, arg0, arg1...]
+      if (Record.size() < 3)
+        return error("Invalid record");
+
+      unsigned OpNum = 0;
+      AttributeSet PAL = getAttributes(Record[OpNum++]);
+      unsigned CCInfo = Record[OpNum++];
+
+      FastMathFlags FMF;
+      if ((CCInfo >> bitc::CALL_FMF) & 1) {
+        FMF = getDecodedFastMathFlags(Record[OpNum++]);
+        if (!FMF.any())
+          return error("Fast math flags indicator set for call with no FMF");
+      }
+
+      FunctionType *FTy = nullptr;
+      if (CCInfo >> bitc::CALL_EXPLICIT_TYPE & 1 &&
+          !(FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]))))
+        return error("Explicit call type is not a function type");
+
+      Value *Callee;
+      if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
+        return error("Invalid record");
+
+      PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
+      if (!OpTy)
+        return error("Callee is not a pointer type");
+      if (!FTy) {
+        FTy = dyn_cast<FunctionType>(OpTy->getElementType());
+        if (!FTy)
+          return error("Callee is not of pointer to function type");
+      } else if (OpTy->getElementType() != FTy)
+        return error("Explicit call type does not match pointee type of "
+                     "callee operand");
+      if (Record.size() < FTy->getNumParams() + OpNum)
+        return error("Insufficient operands to call");
+
+      SmallVector<Value*, 16> Args;
+      // Read the fixed params.
+      for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
+        if (FTy->getParamType(i)->isLabelTy())
+          Args.push_back(getBasicBlock(Record[OpNum]));
+        else
+          Args.push_back(getValue(Record, OpNum, NextValueNo,
+                                  FTy->getParamType(i)));
+        if (!Args.back())
+          return error("Invalid record");
+      }
+
+      // Read type/value pairs for varargs params.
+      if (!FTy->isVarArg()) {
+        if (OpNum != Record.size())
+          return error("Invalid record");
+      } else {
+        while (OpNum != Record.size()) {
+          Value *Op;
+          if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+            return error("Invalid record");
+          Args.push_back(Op);
+        }
+      }
+
+      I = CallInst::Create(FTy, Callee, Args, OperandBundles);
+      OperandBundles.clear();
+      InstructionList.push_back(I);
+      cast<CallInst>(I)->setCallingConv(
+          static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
+      CallInst::TailCallKind TCK = CallInst::TCK_None;
+      if (CCInfo & 1 << bitc::CALL_TAIL)
+        TCK = CallInst::TCK_Tail;
+      if (CCInfo & (1 << bitc::CALL_MUSTTAIL))
+        TCK = CallInst::TCK_MustTail;
+      if (CCInfo & (1 << bitc::CALL_NOTAIL))
+        TCK = CallInst::TCK_NoTail;
+      cast<CallInst>(I)->setTailCallKind(TCK);
+      cast<CallInst>(I)->setAttributes(PAL);
+      if (FMF.any()) {
+        if (!isa<FPMathOperator>(I))
+          return error("Fast-math-flags specified for call without "
+                       "floating-point scalar or vector return type");
+        I->setFastMathFlags(FMF);
+      }
+      break;
+    }
+    case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty]
+      if (Record.size() < 3)
+        return error("Invalid record");
+      Type *OpTy = getTypeByID(Record[0]);
+      Value *Op = getValue(Record, 1, NextValueNo, OpTy);
+      Type *ResTy = getTypeByID(Record[2]);
+      if (!OpTy || !Op || !ResTy)
+        return error("Invalid record");
+      I = new VAArgInst(Op, ResTy);
+      InstructionList.push_back(I);
+      break;
+    }
+
+    case bitc::FUNC_CODE_OPERAND_BUNDLE: {
+      // A call or an invoke can be optionally prefixed with some variable
+      // number of operand bundle blocks.  These blocks are read into
+      // OperandBundles and consumed at the next call or invoke instruction.
+
+      if (Record.size() < 1 || Record[0] >= BundleTags.size())
+        return error("Invalid record");
+
+      std::vector<Value *> Inputs;
+
+      unsigned OpNum = 1;
+      while (OpNum != Record.size()) {
+        Value *Op;
+        if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+          return error("Invalid record");
+        Inputs.push_back(Op);
+      }
+
+      OperandBundles.emplace_back(BundleTags[Record[0]], std::move(Inputs));
+      continue;
+    }
+    }
+
+    // Add instruction to end of current BB.  If there is no current BB, reject
+    // this file.
+    if (!CurBB) {
+      delete I;
+      return error("Invalid instruction with no BB");
+    }
+    if (!OperandBundles.empty()) {
+      delete I;
+      return error("Operand bundles found with no consumer");
+    }
+    CurBB->getInstList().push_back(I);
+
+    // If this was a terminator instruction, move to the next block.
+    if (isa<TerminatorInst>(I)) {
+      ++CurBBNo;
+      CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : nullptr;
+    }
+
+    // Non-void values get registered in the value table for future use.
+    if (I && !I->getType()->isVoidTy())
+      ValueList.assignValue(I, NextValueNo++);
+  }
+
+OutOfRecordLoop:
+
+  if (!OperandBundles.empty())
+    return error("Operand bundles found with no consumer");
+
+  // Check the function list for unresolved values.
+  if (Argument *A = dyn_cast<Argument>(ValueList.back())) {
+    if (!A->getParent()) {
+      // We found at least one unresolved value.  Nuke them all to avoid leaks.
+      for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){
+        if ((A = dyn_cast_or_null<Argument>(ValueList[i])) && !A->getParent()) {
+          A->replaceAllUsesWith(UndefValue::get(A->getType()));
+          delete A;
+        }
+      }
+      return error("Never resolved value found in function");
+    }
+  }
+
+  // Unexpected unresolved metadata about to be dropped.
+  if (MDLoader->hasFwdRefs())
+    return error("Invalid function metadata: outgoing forward refs");
+
+  // Trim the value list down to the size it was before we parsed this function.
+  ValueList.shrinkTo(ModuleValueListSize);
+  MDLoader->shrinkTo(ModuleMDLoaderSize);
+  std::vector<BasicBlock*>().swap(FunctionBBs);
+  return Error::success();
+}
+
+/// Find the function body in the bitcode stream
+Error BitcodeReader::findFunctionInStream(
+    Function *F,
+    DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator) {
+  while (DeferredFunctionInfoIterator->second == 0) {
+    // This is the fallback handling for the old format bitcode that
+    // didn't contain the function index in the VST, or when we have
+    // an anonymous function which would not have a VST entry.
+    // Assert that we have one of those two cases.
+    assert(VSTOffset == 0 || !F->hasName());
+    // Parse the next body in the stream and set its position in the
+    // DeferredFunctionInfo map.
+    if (Error Err = rememberAndSkipFunctionBodies())
+      return Err;
+  }
+  return Error::success();
+}
+
+//===----------------------------------------------------------------------===//
+// GVMaterializer implementation
+//===----------------------------------------------------------------------===//
+
+Error BitcodeReader::materialize(GlobalValue *GV) {
+  Function *F = dyn_cast<Function>(GV);
+  // If it's not a function or is already material, ignore the request.
+  if (!F || !F->isMaterializable())
+    return Error::success();
+
+  DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F);
+  assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!");
+  // If its position is recorded as 0, its body is somewhere in the stream
+  // but we haven't seen it yet.
+  if (DFII->second == 0)
+    if (Error Err = findFunctionInStream(F, DFII))
+      return Err;
+
+  // Materialize metadata before parsing any function bodies.
+  if (Error Err = materializeMetadata())
+    return Err;
+
+  // Move the bit stream to the saved position of the deferred function body.
+  Stream.JumpToBit(DFII->second);
+
+  if (Error Err = parseFunctionBody(F))
+    return Err;
+  F->setIsMaterializable(false);
+
+  if (StripDebugInfo)
+    stripDebugInfo(*F);
+
+  // Upgrade any old intrinsic calls in the function.
+  for (auto &I : UpgradedIntrinsics) {
+    for (auto UI = I.first->materialized_user_begin(), UE = I.first->user_end();
+         UI != UE;) {
+      User *U = *UI;
+      ++UI;
+      if (CallInst *CI = dyn_cast<CallInst>(U))
+        UpgradeIntrinsicCall(CI, I.second);
+    }
+  }
+
+  // Update calls to the remangled intrinsics
+  for (auto &I : RemangledIntrinsics)
+    for (auto UI = I.first->materialized_user_begin(), UE = I.first->user_end();
+         UI != UE;)
+      // Don't expect any other users than call sites
+      CallSite(*UI++).setCalledFunction(I.second);
+
+  // Finish fn->subprogram upgrade for materialized functions.
+  if (DISubprogram *SP = MDLoader->lookupSubprogramForFunction(F))
+    F->setSubprogram(SP);
+
+  // Check if the TBAA Metadata are valid, otherwise we will need to strip them.
+  if (!MDLoader->isStrippingTBAA()) {
+    for (auto &I : instructions(F)) {
+      MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa);
+      if (!TBAA || TBAAVerifyHelper.visitTBAAMetadata(I, TBAA))
+        continue;
+      MDLoader->setStripTBAA(true);
+      stripTBAA(F->getParent());
+    }
+  }
+
+  // Bring in any functions that this function forward-referenced via
+  // blockaddresses.
+  return materializeForwardReferencedFunctions();
+}
+
+Error BitcodeReader::materializeModule() {
+  if (Error Err = materializeMetadata())
+    return Err;
+
+  // Promise to materialize all forward references.
+  WillMaterializeAllForwardRefs = true;
+
+  // Iterate over the module, deserializing any functions that are still on
+  // disk.
+  for (Function &F : *TheModule) {
+    if (Error Err = materialize(&F))
+      return Err;
+  }
+  // At this point, if there are any function bodies, parse the rest of
+  // the bits in the module past the last function block we have recorded
+  // through either lazy scanning or the VST.
+  if (LastFunctionBlockBit || NextUnreadBit)
+    if (Error Err = parseModule(LastFunctionBlockBit > NextUnreadBit
+                                    ? LastFunctionBlockBit
+                                    : NextUnreadBit))
+      return Err;
+
+  // Check that all block address forward references got resolved (as we
+  // promised above).
+  if (!BasicBlockFwdRefs.empty())
+    return error("Never resolved function from blockaddress");
+
+  // Upgrade any intrinsic calls that slipped through (should not happen!) and
+  // delete the old functions to clean up. We can't do this unless the entire
+  // module is materialized because there could always be another function body
+  // with calls to the old function.
+  for (auto &I : UpgradedIntrinsics) {
+    for (auto *U : I.first->users()) {
+      if (CallInst *CI = dyn_cast<CallInst>(U))
+        UpgradeIntrinsicCall(CI, I.second);
+    }
+    if (!I.first->use_empty())
+      I.first->replaceAllUsesWith(I.second);
+    I.first->eraseFromParent();
+  }
+  UpgradedIntrinsics.clear();
+  // Do the same for remangled intrinsics
+  for (auto &I : RemangledIntrinsics) {
+    I.first->replaceAllUsesWith(I.second);
+    I.first->eraseFromParent();
+  }
+  RemangledIntrinsics.clear();
+
+  UpgradeDebugInfo(*TheModule);
+
+  UpgradeModuleFlags(*TheModule);
+  return Error::success();
+}
+
+std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const {
+  return IdentifiedStructTypes;
+}
+
+ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader(
+    BitstreamCursor Cursor, ModuleSummaryIndex &TheIndex)
+    : BitcodeReaderBase(std::move(Cursor)), TheIndex(TheIndex) {}
+
+std::pair<GlobalValue::GUID, GlobalValue::GUID>
+ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) {
+  auto VGI = ValueIdToCallGraphGUIDMap.find(ValueId);
+  assert(VGI != ValueIdToCallGraphGUIDMap.end());
+  return VGI->second;
+}
+
+// Specialized value symbol table parser used when reading module index
+// blocks where we don't actually create global values. The parsed information
+// is saved in the bitcode reader for use when later parsing summaries.
+Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
+    uint64_t Offset,
+    DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap) {
+  assert(Offset > 0 && "Expected non-zero VST offset");
+  uint64_t CurrentBit = jumpToValueSymbolTable(Offset, Stream);
+
+  if (Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+
+  // Read all the records for this value table.
+  SmallString<128> ValueName;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      // Done parsing VST, jump back to wherever we came from.
+      Stream.JumpToBit(CurrentBit);
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record.
+    Record.clear();
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default: // Default behavior: ignore (e.g. VST_CODE_BBENTRY records).
+      break;
+    case bitc::VST_CODE_ENTRY: { // VST_CODE_ENTRY: [valueid, namechar x N]
+      if (convertToString(Record, 1, ValueName))
+        return error("Invalid record");
+      unsigned ValueID = Record[0];
+      assert(!SourceFileName.empty());
+      auto VLI = ValueIdToLinkageMap.find(ValueID);
+      assert(VLI != ValueIdToLinkageMap.end() &&
+             "No linkage found for VST entry?");
+      auto Linkage = VLI->second;
+      std::string GlobalId =
+          GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName);
+      auto ValueGUID = GlobalValue::getGUID(GlobalId);
+      auto OriginalNameID = ValueGUID;
+      if (GlobalValue::isLocalLinkage(Linkage))
+        OriginalNameID = GlobalValue::getGUID(ValueName);
+      if (PrintSummaryGUIDs)
+        dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is "
+               << ValueName << "\n";
+      ValueIdToCallGraphGUIDMap[ValueID] =
+          std::make_pair(ValueGUID, OriginalNameID);
+      ValueName.clear();
+      break;
+    }
+    case bitc::VST_CODE_FNENTRY: {
+      // VST_CODE_FNENTRY: [valueid, offset, namechar x N]
+      if (convertToString(Record, 2, ValueName))
+        return error("Invalid record");
+      unsigned ValueID = Record[0];
+      assert(!SourceFileName.empty());
+      auto VLI = ValueIdToLinkageMap.find(ValueID);
+      assert(VLI != ValueIdToLinkageMap.end() &&
+             "No linkage found for VST entry?");
+      auto Linkage = VLI->second;
+      std::string FunctionGlobalId = GlobalValue::getGlobalIdentifier(
+          ValueName, VLI->second, SourceFileName);
+      auto FunctionGUID = GlobalValue::getGUID(FunctionGlobalId);
+      auto OriginalNameID = FunctionGUID;
+      if (GlobalValue::isLocalLinkage(Linkage))
+        OriginalNameID = GlobalValue::getGUID(ValueName);
+      if (PrintSummaryGUIDs)
+        dbgs() << "GUID " << FunctionGUID << "(" << OriginalNameID << ") is "
+               << ValueName << "\n";
+      ValueIdToCallGraphGUIDMap[ValueID] =
+          std::make_pair(FunctionGUID, OriginalNameID);
+
+      ValueName.clear();
+      break;
+    }
+    case bitc::VST_CODE_COMBINED_ENTRY: {
+      // VST_CODE_COMBINED_ENTRY: [valueid, refguid]
+      unsigned ValueID = Record[0];
+      GlobalValue::GUID RefGUID = Record[1];
+      // The "original name", which is the second value of the pair will be
+      // overriden later by a FS_COMBINED_ORIGINAL_NAME in the combined index.
+      ValueIdToCallGraphGUIDMap[ValueID] = std::make_pair(RefGUID, RefGUID);
+      break;
+    }
+    }
+  }
+}
+
+// Parse just the blocks needed for building the index out of the module.
+// At the end of this routine the module Index is populated with a map
+// from global value id to GlobalValueSummary objects.
+Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) {
+  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+  DenseMap<unsigned, GlobalValue::LinkageTypes> ValueIdToLinkageMap;
+  unsigned ValueId = 0;
+
+  // Read the index for this module.
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return Error::success();
+
+    case BitstreamEntry::SubBlock:
+      switch (Entry.ID) {
+      default: // Skip unknown content.
+        if (Stream.SkipBlock())
+          return error("Invalid record");
+        break;
+      case bitc::BLOCKINFO_BLOCK_ID:
+        // Need to parse these to get abbrev ids (e.g. for VST)
+        if (readBlockInfo())
+          return error("Malformed block");
+        break;
+      case bitc::VALUE_SYMTAB_BLOCK_ID:
+        // Should have been parsed earlier via VSTOffset, unless there
+        // is no summary section.
+        assert(((SeenValueSymbolTable && VSTOffset > 0) ||
+                !SeenGlobalValSummary) &&
+               "Expected early VST parse via VSTOffset record");
+        if (Stream.SkipBlock())
+          return error("Invalid record");
+        break;
+      case bitc::GLOBALVAL_SUMMARY_BLOCK_ID:
+        assert(!SeenValueSymbolTable &&
+               "Already read VST when parsing summary block?");
+        // We might not have a VST if there were no values in the
+        // summary. An empty summary block generated when we are
+        // performing ThinLTO compiles so we don't later invoke
+        // the regular LTO process on them.
+        if (VSTOffset > 0) {
+          if (Error Err = parseValueSymbolTable(VSTOffset, ValueIdToLinkageMap))
+            return Err;
+          SeenValueSymbolTable = true;
+        }
+        SeenGlobalValSummary = true;
+        if (Error Err = parseEntireSummary(ModulePath))
+          return Err;
+        break;
+      case bitc::MODULE_STRTAB_BLOCK_ID:
+        if (Error Err = parseModuleStringTable())
+          return Err;
+        break;
+      }
+      continue;
+
+    case BitstreamEntry::Record: {
+        Record.clear();
+        auto BitCode = Stream.readRecord(Entry.ID, Record);
+        switch (BitCode) {
+        default:
+          break; // Default behavior, ignore unknown content.
+        /// MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+        case bitc::MODULE_CODE_SOURCE_FILENAME: {
+          SmallString<128> ValueName;
+          if (convertToString(Record, 0, ValueName))
+            return error("Invalid record");
+          SourceFileName = ValueName.c_str();
+          break;
+        }
+        /// MODULE_CODE_HASH: [5*i32]
+        case bitc::MODULE_CODE_HASH: {
+          if (Record.size() != 5)
+            return error("Invalid hash length " + Twine(Record.size()).str());
+          if (TheIndex.modulePaths().empty())
+            // We always seed the index with the module.
+            TheIndex.addModulePath(ModulePath, 0);
+          if (TheIndex.modulePaths().size() != 1)
+            return error("Don't expect multiple modules defined?");
+          auto &Hash = TheIndex.modulePaths().begin()->second.second;
+          int Pos = 0;
+          for (auto &Val : Record) {
+            assert(!(Val >> 32) && "Unexpected high bits set");
+            Hash[Pos++] = Val;
+          }
+          break;
+        }
+        /// MODULE_CODE_VSTOFFSET: [offset]
+        case bitc::MODULE_CODE_VSTOFFSET:
+          if (Record.size() < 1)
+            return error("Invalid record");
+          // Note that we subtract 1 here because the offset is relative to one
+          // word before the start of the identification or module block, which
+          // was historically always the start of the regular bitcode header.
+          VSTOffset = Record[0] - 1;
+          break;
+        // GLOBALVAR: [pointer type, isconst, initid,
+        //             linkage, alignment, section, visibility, threadlocal,
+        //             unnamed_addr, externally_initialized, dllstorageclass,
+        //             comdat]
+        case bitc::MODULE_CODE_GLOBALVAR: {
+          if (Record.size() < 6)
+            return error("Invalid record");
+          uint64_t RawLinkage = Record[3];
+          GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage);
+          ValueIdToLinkageMap[ValueId++] = Linkage;
+          break;
+        }
+        // FUNCTION:  [type, callingconv, isproto, linkage, paramattr,
+        //             alignment, section, visibility, gc, unnamed_addr,
+        //             prologuedata, dllstorageclass, comdat, prefixdata]
+        case bitc::MODULE_CODE_FUNCTION: {
+          if (Record.size() < 8)
+            return error("Invalid record");
+          uint64_t RawLinkage = Record[3];
+          GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage);
+          ValueIdToLinkageMap[ValueId++] = Linkage;
+          break;
+        }
+        // ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility,
+        // dllstorageclass]
+        case bitc::MODULE_CODE_ALIAS: {
+          if (Record.size() < 6)
+            return error("Invalid record");
+          uint64_t RawLinkage = Record[3];
+          GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage);
+          ValueIdToLinkageMap[ValueId++] = Linkage;
+          break;
+        }
+        }
+      }
+      continue;
+    }
+  }
+}
+
+std::vector<ValueInfo>
+ModuleSummaryIndexBitcodeReader::makeRefList(ArrayRef<uint64_t> Record) {
+  std::vector<ValueInfo> Ret;
+  Ret.reserve(Record.size());
+  for (uint64_t RefValueId : Record)
+    Ret.push_back(getGUIDFromValueId(RefValueId).first);
+  return Ret;
+}
+
+std::vector<FunctionSummary::EdgeTy> ModuleSummaryIndexBitcodeReader::makeCallList(
+    ArrayRef<uint64_t> Record, bool IsOldProfileFormat, bool HasProfile) {
+  std::vector<FunctionSummary::EdgeTy> Ret;
+  Ret.reserve(Record.size());
+  for (unsigned I = 0, E = Record.size(); I != E; ++I) {
+    CalleeInfo::HotnessType Hotness = CalleeInfo::HotnessType::Unknown;
+    GlobalValue::GUID CalleeGUID = getGUIDFromValueId(Record[I]).first;
+    if (IsOldProfileFormat) {
+      I += 1; // Skip old callsitecount field
+      if (HasProfile)
+        I += 1; // Skip old profilecount field
+    } else if (HasProfile)
+      Hotness = static_cast<CalleeInfo::HotnessType>(Record[++I]);
+    Ret.push_back(FunctionSummary::EdgeTy{CalleeGUID, CalleeInfo{Hotness}});
+  }
+  return Ret;
+}
+
+// Eagerly parse the entire summary block. This populates the GlobalValueSummary
+// objects in the index.
+Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
+    StringRef ModulePath) {
+  if (Stream.EnterSubBlock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID))
+    return error("Invalid record");
+  SmallVector<uint64_t, 64> Record;
+
+  // Parse version
+  {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+    if (Entry.Kind != BitstreamEntry::Record)
+      return error("Invalid Summary Block: record for version expected");
+    if (Stream.readRecord(Entry.ID, Record) != bitc::FS_VERSION)
+      return error("Invalid Summary Block: version expected");
+  }
+  const uint64_t Version = Record[0];
+  const bool IsOldProfileFormat = Version == 1;
+  if (Version < 1 || Version > 3)
+    return error("Invalid summary version " + Twine(Version) +
+                 ", 1, 2 or 3 expected");
+  Record.clear();
+
+  // Keep around the last seen summary to be used when we see an optional
+  // "OriginalName" attachement.
+  GlobalValueSummary *LastSeenSummary = nullptr;
+  bool Combined = false;
+  std::vector<GlobalValue::GUID> PendingTypeTests;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      // For a per-module index, remove any entries that still have empty
+      // summaries. The VST parsing creates entries eagerly for all symbols,
+      // but not all have associated summaries (e.g. it doesn't know how to
+      // distinguish between VST_CODE_ENTRY for function declarations vs global
+      // variables with initializers that end up with a summary). Remove those
+      // entries now so that we don't need to rely on the combined index merger
+      // to clean them up (especially since that may not run for the first
+      // module's index if we merge into that).
+      if (!Combined)
+        TheIndex.removeEmptySummaryEntries();
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    // Read a record. The record format depends on whether this
+    // is a per-module index or a combined index file. In the per-module
+    // case the records contain the associated value's ID for correlation
+    // with VST entries. In the combined index the correlation is done
+    // via the bitcode offset of the summary records (which were saved
+    // in the combined index VST entries). The records also contain
+    // information used for ThinLTO renaming and importing.
+    Record.clear();
+    auto BitCode = Stream.readRecord(Entry.ID, Record);
+    switch (BitCode) {
+    default: // Default behavior: ignore.
+      break;
+    // FS_PERMODULE: [valueid, flags, instcount, numrefs, numrefs x valueid,
+    //                n x (valueid)]
+    // FS_PERMODULE_PROFILE: [valueid, flags, instcount, numrefs,
+    //                        numrefs x valueid,
+    //                        n x (valueid, hotness)]
+    case bitc::FS_PERMODULE:
+    case bitc::FS_PERMODULE_PROFILE: {
+      unsigned ValueID = Record[0];
+      uint64_t RawFlags = Record[1];
+      unsigned InstCount = Record[2];
+      unsigned NumRefs = Record[3];
+      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+      // The module path string ref set in the summary must be owned by the
+      // index's module string table. Since we don't have a module path
+      // string table section in the per-module index, we create a single
+      // module path string table entry with an empty (0) ID to take
+      // ownership.
+      static int RefListStartIndex = 4;
+      int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs;
+      assert(Record.size() >= RefListStartIndex + NumRefs &&
+             "Record size inconsistent with number of references");
+      std::vector<ValueInfo> Refs = makeRefList(
+          ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs));
+      bool HasProfile = (BitCode == bitc::FS_PERMODULE_PROFILE);
+      std::vector<FunctionSummary::EdgeTy> Calls = makeCallList(
+          ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
+          IsOldProfileFormat, HasProfile);
+      auto FS = llvm::make_unique<FunctionSummary>(
+          Flags, InstCount, std::move(Refs), std::move(Calls),
+          std::move(PendingTypeTests));
+      PendingTypeTests.clear();
+      auto GUID = getGUIDFromValueId(ValueID);
+      FS->setModulePath(TheIndex.addModulePath(ModulePath, 0)->first());
+      FS->setOriginalName(GUID.second);
+      TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
+      break;
+    }
+    // FS_ALIAS: [valueid, flags, valueid]
+    // Aliases must be emitted (and parsed) after all FS_PERMODULE entries, as
+    // they expect all aliasee summaries to be available.
+    case bitc::FS_ALIAS: {
+      unsigned ValueID = Record[0];
+      uint64_t RawFlags = Record[1];
+      unsigned AliaseeID = Record[2];
+      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+      auto AS =
+          llvm::make_unique<AliasSummary>(Flags, std::vector<ValueInfo>{});
+      // The module path string ref set in the summary must be owned by the
+      // index's module string table. Since we don't have a module path
+      // string table section in the per-module index, we create a single
+      // module path string table entry with an empty (0) ID to take
+      // ownership.
+      AS->setModulePath(TheIndex.addModulePath(ModulePath, 0)->first());
+
+      GlobalValue::GUID AliaseeGUID = getGUIDFromValueId(AliaseeID).first;
+      auto *AliaseeSummary = TheIndex.getGlobalValueSummary(AliaseeGUID);
+      if (!AliaseeSummary)
+        return error("Alias expects aliasee summary to be parsed");
+      AS->setAliasee(AliaseeSummary);
+
+      auto GUID = getGUIDFromValueId(ValueID);
+      AS->setOriginalName(GUID.second);
+      TheIndex.addGlobalValueSummary(GUID.first, std::move(AS));
+      break;
+    }
+    // FS_PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, n x valueid]
+    case bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS: {
+      unsigned ValueID = Record[0];
+      uint64_t RawFlags = Record[1];
+      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+      std::vector<ValueInfo> Refs =
+          makeRefList(ArrayRef<uint64_t>(Record).slice(2));
+      auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
+      FS->setModulePath(TheIndex.addModulePath(ModulePath, 0)->first());
+      auto GUID = getGUIDFromValueId(ValueID);
+      FS->setOriginalName(GUID.second);
+      TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
+      break;
+    }
+    // FS_COMBINED: [valueid, modid, flags, instcount, numrefs,
+    //               numrefs x valueid, n x (valueid)]
+    // FS_COMBINED_PROFILE: [valueid, modid, flags, instcount, numrefs,
+    //                       numrefs x valueid, n x (valueid, hotness)]
+    case bitc::FS_COMBINED:
+    case bitc::FS_COMBINED_PROFILE: {
+      unsigned ValueID = Record[0];
+      uint64_t ModuleId = Record[1];
+      uint64_t RawFlags = Record[2];
+      unsigned InstCount = Record[3];
+      unsigned NumRefs = Record[4];
+      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+      static int RefListStartIndex = 5;
+      int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs;
+      assert(Record.size() >= RefListStartIndex + NumRefs &&
+             "Record size inconsistent with number of references");
+      std::vector<ValueInfo> Refs = makeRefList(
+          ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs));
+      bool HasProfile = (BitCode == bitc::FS_COMBINED_PROFILE);
+      std::vector<FunctionSummary::EdgeTy> Edges = makeCallList(
+          ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
+          IsOldProfileFormat, HasProfile);
+      GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
+      auto FS = llvm::make_unique<FunctionSummary>(
+          Flags, InstCount, std::move(Refs), std::move(Edges),
+          std::move(PendingTypeTests));
+      PendingTypeTests.clear();
+      LastSeenSummary = FS.get();
+      FS->setModulePath(ModuleIdMap[ModuleId]);
+      TheIndex.addGlobalValueSummary(GUID, std::move(FS));
+      Combined = true;
+      break;
+    }
+    // FS_COMBINED_ALIAS: [valueid, modid, flags, valueid]
+    // Aliases must be emitted (and parsed) after all FS_COMBINED entries, as
+    // they expect all aliasee summaries to be available.
+    case bitc::FS_COMBINED_ALIAS: {
+      unsigned ValueID = Record[0];
+      uint64_t ModuleId = Record[1];
+      uint64_t RawFlags = Record[2];
+      unsigned AliaseeValueId = Record[3];
+      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+      auto AS = llvm::make_unique<AliasSummary>(Flags, std::vector<ValueInfo>{});
+      LastSeenSummary = AS.get();
+      AS->setModulePath(ModuleIdMap[ModuleId]);
+
+      auto AliaseeGUID = getGUIDFromValueId(AliaseeValueId).first;
+      auto AliaseeInModule =
+          TheIndex.findSummaryInModule(AliaseeGUID, AS->modulePath());
+      if (!AliaseeInModule)
+        return error("Alias expects aliasee summary to be parsed");
+      AS->setAliasee(AliaseeInModule);
+
+      GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
+      TheIndex.addGlobalValueSummary(GUID, std::move(AS));
+      Combined = true;
+      break;
+    }
+    // FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
+    case bitc::FS_COMBINED_GLOBALVAR_INIT_REFS: {
+      unsigned ValueID = Record[0];
+      uint64_t ModuleId = Record[1];
+      uint64_t RawFlags = Record[2];
+      auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+      std::vector<ValueInfo> Refs =
+          makeRefList(ArrayRef<uint64_t>(Record).slice(3));
+      auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
+      LastSeenSummary = FS.get();
+      FS->setModulePath(ModuleIdMap[ModuleId]);
+      GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
+      TheIndex.addGlobalValueSummary(GUID, std::move(FS));
+      Combined = true;
+      break;
+    }
+    // FS_COMBINED_ORIGINAL_NAME: [original_name]
+    case bitc::FS_COMBINED_ORIGINAL_NAME: {
+      uint64_t OriginalName = Record[0];
+      if (!LastSeenSummary)
+        return error("Name attachment that does not follow a combined record");
+      LastSeenSummary->setOriginalName(OriginalName);
+      // Reset the LastSeenSummary
+      LastSeenSummary = nullptr;
+      break;
+    }
+    case bitc::FS_TYPE_TESTS: {
+      assert(PendingTypeTests.empty());
+      PendingTypeTests.insert(PendingTypeTests.end(), Record.begin(),
+                              Record.end());
+      break;
+    }
+    }
+  }
+  llvm_unreachable("Exit infinite loop");
+}
+
+// Parse the  module string table block into the Index.
+// This populates the ModulePathStringTable map in the index.
+Error ModuleSummaryIndexBitcodeReader::parseModuleStringTable() {
+  if (Stream.EnterSubBlock(bitc::MODULE_STRTAB_BLOCK_ID))
+    return error("Invalid record");
+
+  SmallVector<uint64_t, 64> Record;
+
+  SmallString<128> ModulePath;
+  ModulePathStringTableTy::iterator LastSeenModulePath;
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::SubBlock: // Handled for us already.
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return Error::success();
+    case BitstreamEntry::Record:
+      // The interesting case.
+      break;
+    }
+
+    Record.clear();
+    switch (Stream.readRecord(Entry.ID, Record)) {
+    default: // Default behavior: ignore.
+      break;
+    case bitc::MST_CODE_ENTRY: {
+      // MST_ENTRY: [modid, namechar x N]
+      uint64_t ModuleId = Record[0];
+
+      if (convertToString(Record, 1, ModulePath))
+        return error("Invalid record");
+
+      LastSeenModulePath = TheIndex.addModulePath(ModulePath, ModuleId);
+      ModuleIdMap[ModuleId] = LastSeenModulePath->first();
+
+      ModulePath.clear();
+      break;
+    }
+    /// MST_CODE_HASH: [5*i32]
+    case bitc::MST_CODE_HASH: {
+      if (Record.size() != 5)
+        return error("Invalid hash length " + Twine(Record.size()).str());
+      if (LastSeenModulePath == TheIndex.modulePaths().end())
+        return error("Invalid hash that does not follow a module path");
+      int Pos = 0;
+      for (auto &Val : Record) {
+        assert(!(Val >> 32) && "Unexpected high bits set");
+        LastSeenModulePath->second.second[Pos++] = Val;
+      }
+      // Reset LastSeenModulePath to avoid overriding the hash unexpectedly.
+      LastSeenModulePath = TheIndex.modulePaths().end();
+      break;
+    }
+    }
+  }
+  llvm_unreachable("Exit infinite loop");
+}
+
+namespace {
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class BitcodeErrorCategoryType : public std::error_category {
+  const char *name() const noexcept override {
+    return "llvm.bitcode";
+  }
+  std::string message(int IE) const override {
+    BitcodeError E = static_cast<BitcodeError>(IE);
+    switch (E) {
+    case BitcodeError::CorruptedBitcode:
+      return "Corrupted bitcode";
+    }
+    llvm_unreachable("Unknown error type!");
+  }
+};
+
+} // end anonymous namespace
+
+static ManagedStatic<BitcodeErrorCategoryType> ErrorCategory;
+
+const std::error_category &llvm::BitcodeErrorCategory() {
+  return *ErrorCategory;
+}
+
+//===----------------------------------------------------------------------===//
+// External interface
+//===----------------------------------------------------------------------===//
+
+Expected<std::vector<BitcodeModule>>
+llvm::getBitcodeModuleList(MemoryBufferRef Buffer) {
+  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+  if (!StreamOrErr)
+    return StreamOrErr.takeError();
+  BitstreamCursor &Stream = *StreamOrErr;
+
+  std::vector<BitcodeModule> Modules;
+  while (true) {
+    uint64_t BCBegin = Stream.getCurrentByteNo();
+
+    // We may be consuming bitcode from a client that leaves garbage at the end
+    // of the bitcode stream (e.g. Apple's ar tool). If we are close enough to
+    // the end that there cannot possibly be another module, stop looking.
+    if (BCBegin + 8 >= Stream.getBitcodeBytes().size())
+      return Modules;
+
+    BitstreamEntry Entry = Stream.advance();
+    switch (Entry.Kind) {
+    case BitstreamEntry::EndBlock:
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+
+    case BitstreamEntry::SubBlock: {
+      uint64_t IdentificationBit = -1ull;
+      if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID) {
+        IdentificationBit = Stream.GetCurrentBitNo() - BCBegin * 8;
+        if (Stream.SkipBlock())
+          return error("Malformed block");
+
+        Entry = Stream.advance();
+        if (Entry.Kind != BitstreamEntry::SubBlock ||
+            Entry.ID != bitc::MODULE_BLOCK_ID)
+          return error("Malformed block");
+      }
+
+      if (Entry.ID == bitc::MODULE_BLOCK_ID) {
+        uint64_t ModuleBit = Stream.GetCurrentBitNo() - BCBegin * 8;
+        if (Stream.SkipBlock())
+          return error("Malformed block");
+
+        Modules.push_back({Stream.getBitcodeBytes().slice(
+                               BCBegin, Stream.getCurrentByteNo() - BCBegin),
+                           Buffer.getBufferIdentifier(), IdentificationBit,
+                           ModuleBit});
+        continue;
+      }
+
+      if (Stream.SkipBlock())
+        return error("Malformed block");
+      continue;
+    }
+    case BitstreamEntry::Record:
+      Stream.skipRecord(Entry.ID);
+      continue;
+    }
+  }
+}
+
+/// \brief Get a lazy one-at-time loading module from bitcode.
+///
+/// This isn't always used in a lazy context.  In particular, it's also used by
+/// \a parseModule().  If this is truly lazy, then we need to eagerly pull
+/// in forward-referenced functions from block address references.
+///
+/// \param[in] MaterializeAll Set to \c true if we should materialize
+/// everything.
+Expected<std::unique_ptr<Module>>
+BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll,
+                             bool ShouldLazyLoadMetadata, bool IsImporting) {
+  BitstreamCursor Stream(Buffer);
+
+  std::string ProducerIdentification;
+  if (IdentificationBit != -1ull) {
+    Stream.JumpToBit(IdentificationBit);
+    Expected<std::string> ProducerIdentificationOrErr =
+        readIdentificationBlock(Stream);
+    if (!ProducerIdentificationOrErr)
+      return ProducerIdentificationOrErr.takeError();
+
+    ProducerIdentification = *ProducerIdentificationOrErr;
+  }
+
+  Stream.JumpToBit(ModuleBit);
+  auto *R =
+      new BitcodeReader(std::move(Stream), ProducerIdentification, Context);
+
+  std::unique_ptr<Module> M =
+      llvm::make_unique<Module>(ModuleIdentifier, Context);
+  M->setMaterializer(R);
+
+  // Delay parsing Metadata if ShouldLazyLoadMetadata is true.
+  if (Error Err =
+          R->parseBitcodeInto(M.get(), ShouldLazyLoadMetadata, IsImporting))
+    return std::move(Err);
+
+  if (MaterializeAll) {
+    // Read in the entire module, and destroy the BitcodeReader.
+    if (Error Err = M->materializeAll())
+      return std::move(Err);
+  } else {
+    // Resolve forward references from blockaddresses.
+    if (Error Err = R->materializeForwardReferencedFunctions())
+      return std::move(Err);
+  }
+  return std::move(M);
+}
+
+Expected<std::unique_ptr<Module>>
+BitcodeModule::getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata,
+                             bool IsImporting) {
+  return getModuleImpl(Context, false, ShouldLazyLoadMetadata, IsImporting);
+}
+
+// Parse the specified bitcode buffer, returning the function info index.
+Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() {
+  BitstreamCursor Stream(Buffer);
+  Stream.JumpToBit(ModuleBit);
+
+  auto Index = llvm::make_unique<ModuleSummaryIndex>();
+  ModuleSummaryIndexBitcodeReader R(std::move(Stream), *Index);
+
+  if (Error Err = R.parseModule(ModuleIdentifier))
+    return std::move(Err);
+
+  return std::move(Index);
+}
+
+// Check if the given bitcode buffer contains a global value summary block.
+Expected<bool> BitcodeModule::hasSummary() {
+  BitstreamCursor Stream(Buffer);
+  Stream.JumpToBit(ModuleBit);
+
+  if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+    return error("Invalid record");
+
+  while (true) {
+    BitstreamEntry Entry = Stream.advance();
+
+    switch (Entry.Kind) {
+    case BitstreamEntry::Error:
+      return error("Malformed block");
+    case BitstreamEntry::EndBlock:
+      return false;
+
+    case BitstreamEntry::SubBlock:
+      if (Entry.ID == bitc::GLOBALVAL_SUMMARY_BLOCK_ID)
+        return true;
+
+      // Ignore other sub-blocks.
+      if (Stream.SkipBlock())
+        return error("Malformed block");
+      continue;
+
+    case BitstreamEntry::Record:
+      Stream.skipRecord(Entry.ID);
+      continue;
+    }
+  }
+}
+
+static Expected<BitcodeModule> getSingleModule(MemoryBufferRef Buffer) {
+  Expected<std::vector<BitcodeModule>> MsOrErr = getBitcodeModuleList(Buffer);
+  if (!MsOrErr)
+    return MsOrErr.takeError();
+
+  if (MsOrErr->size() != 1)
+    return error("Expected a single module");
+
+  return (*MsOrErr)[0];
+}
+
+Expected<std::unique_ptr<Module>>
+llvm::getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context,
+                           bool ShouldLazyLoadMetadata, bool IsImporting) {
+  Expected<BitcodeModule> BM = getSingleModule(Buffer);
+  if (!BM)
+    return BM.takeError();
+
+  return BM->getLazyModule(Context, ShouldLazyLoadMetadata, IsImporting);
+}
+
+Expected<std::unique_ptr<Module>> llvm::getOwningLazyBitcodeModule(
+    std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context,
+    bool ShouldLazyLoadMetadata, bool IsImporting) {
+  auto MOrErr = getLazyBitcodeModule(*Buffer, Context, ShouldLazyLoadMetadata,
+                                     IsImporting);
+  if (MOrErr)
+    (*MOrErr)->setOwnedMemoryBuffer(std::move(Buffer));
+  return MOrErr;
+}
+
+Expected<std::unique_ptr<Module>>
+BitcodeModule::parseModule(LLVMContext &Context) {
+  return getModuleImpl(Context, true, false, false);
+  // TODO: Restore the use-lists to the in-memory state when the bitcode was
+  // written.  We must defer until the Module has been fully materialized.
+}
+
+Expected<std::unique_ptr<Module>> llvm::parseBitcodeFile(MemoryBufferRef Buffer,
+                                                         LLVMContext &Context) {
+  Expected<BitcodeModule> BM = getSingleModule(Buffer);
+  if (!BM)
+    return BM.takeError();
+
+  return BM->parseModule(Context);
+}
+
+Expected<std::string> llvm::getBitcodeTargetTriple(MemoryBufferRef Buffer) {
+  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+  if (!StreamOrErr)
+    return StreamOrErr.takeError();
+
+  return readTriple(*StreamOrErr);
+}
+
+Expected<bool> llvm::isBitcodeContainingObjCCategory(MemoryBufferRef Buffer) {
+  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+  if (!StreamOrErr)
+    return StreamOrErr.takeError();
+
+  return hasObjCCategory(*StreamOrErr);
+}
+
+Expected<std::string> llvm::getBitcodeProducerString(MemoryBufferRef Buffer) {
+  Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+  if (!StreamOrErr)
+    return StreamOrErr.takeError();
+
+  return readIdentificationCode(*StreamOrErr);
+}
+
+Expected<std::unique_ptr<ModuleSummaryIndex>>
+llvm::getModuleSummaryIndex(MemoryBufferRef Buffer) {
+  Expected<BitcodeModule> BM = getSingleModule(Buffer);
+  if (!BM)
+    return BM.takeError();
+
+  return BM->getSummary();
+}
+
+Expected<bool> llvm::hasGlobalValueSummary(MemoryBufferRef Buffer) {
+  Expected<BitcodeModule> BM = getSingleModule(Buffer);
+  if (!BM)
+    return BM.takeError();
+
+  return BM->hasSummary();
+}
diff --git a/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp.patch b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp.patch
new file mode 100644
index 0000000000..638614a7f9
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Reader/BitcodeReader.cpp.patch
@@ -0,0 +1,15 @@
+--- ../../../lib/Bitcode/Reader/BitcodeReader.cpp	2019-12-29 18:23:35.483920360 -0600
++++ lib/Bitcode/Reader/BitcodeReader.cpp	2019-12-29 18:46:57.005656131 -0600
+@@ -1066,6 +1066,12 @@
+   case Attribute::SwiftSelf:       return 1ULL << 51;
+   case Attribute::SwiftError:      return 1ULL << 52;
+   case Attribute::WriteOnly:       return 1ULL << 53;
++
++  // VISC Attributes
++  case Attribute::In:                return 1ULL << 54;
++  case Attribute::Out:               return 1ULL << 55;
++  case Attribute::InOut:             return 1ULL << 56;
++
+   case Attribute::Dereferenceable:
+     llvm_unreachable("dereferenceable attribute not supported in raw format");
+     break;
diff --git a/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp
new file mode 100644
index 0000000000..cf625f43b4
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -0,0 +1,3970 @@
+//===--- Bitcode/Writer/BitcodeWriter.cpp - Bitcode Writer ----------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Bitcode writer implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "ValueEnumerator.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/IR/CallSite.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/UseListOrder.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/SHA1.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cctype>
+#include <map>
+using namespace llvm;
+
+namespace {
+
+cl::opt<unsigned>
+    IndexThreshold("bitcode-mdindex-threshold", cl::Hidden, cl::init(25),
+                   cl::desc("Number of metadatas above which we emit an index "
+                            "to enable lazy-loading"));
+/// These are manifest constants used by the bitcode writer. They do not need to
+/// be kept in sync with the reader, but need to be consistent within this file.
+enum {
+  // VALUE_SYMTAB_BLOCK abbrev id's.
+  VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
+  VST_ENTRY_7_ABBREV,
+  VST_ENTRY_6_ABBREV,
+  VST_BBENTRY_6_ABBREV,
+
+  // CONSTANTS_BLOCK abbrev id's.
+  CONSTANTS_SETTYPE_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
+  CONSTANTS_INTEGER_ABBREV,
+  CONSTANTS_CE_CAST_Abbrev,
+  CONSTANTS_NULL_Abbrev,
+
+  // FUNCTION_BLOCK abbrev id's.
+  FUNCTION_INST_LOAD_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
+  FUNCTION_INST_BINOP_ABBREV,
+  FUNCTION_INST_BINOP_FLAGS_ABBREV,
+  FUNCTION_INST_CAST_ABBREV,
+  FUNCTION_INST_RET_VOID_ABBREV,
+  FUNCTION_INST_RET_VAL_ABBREV,
+  FUNCTION_INST_UNREACHABLE_ABBREV,
+  FUNCTION_INST_GEP_ABBREV,
+};
+
+/// Abstract class to manage the bitcode writing, subclassed for each bitcode
+/// file type.
+class BitcodeWriterBase {
+protected:
+  /// The stream created and owned by the client.
+  BitstreamWriter &Stream;
+
+  /// Saves the offset of the VSTOffset record that must eventually be
+  /// backpatched with the offset of the actual VST.
+  uint64_t VSTOffsetPlaceholder = 0;
+
+public:
+  /// Constructs a BitcodeWriterBase object that writes to the provided
+  /// \p Stream.
+  BitcodeWriterBase(BitstreamWriter &Stream) : Stream(Stream) {}
+
+protected:
+  bool hasVSTOffsetPlaceholder() { return VSTOffsetPlaceholder != 0; }
+  void writeValueSymbolTableForwardDecl();
+  void writeBitcodeHeader();
+};
+
+/// Class to manage the bitcode writing for a module.
+class ModuleBitcodeWriter : public BitcodeWriterBase {
+  /// Pointer to the buffer allocated by caller for bitcode writing.
+  const SmallVectorImpl<char> &Buffer;
+
+  /// The Module to write to bitcode.
+  const Module &M;
+
+  /// Enumerates ids for all values in the module.
+  ValueEnumerator VE;
+
+  /// Optional per-module index to write for ThinLTO.
+  const ModuleSummaryIndex *Index;
+
+  /// True if a module hash record should be written.
+  bool GenerateHash;
+
+  /// The start bit of the identification block.
+  uint64_t BitcodeStartBit;
+
+  /// Map that holds the correspondence between GUIDs in the summary index,
+  /// that came from indirect call profiles, and a value id generated by this
+  /// class to use in the VST and summary block records.
+  std::map<GlobalValue::GUID, unsigned> GUIDToValueIdMap;
+
+  /// Tracks the last value id recorded in the GUIDToValueMap.
+  unsigned GlobalValueId;
+
+public:
+  /// Constructs a ModuleBitcodeWriter object for the given Module,
+  /// writing to the provided \p Buffer.
+  ModuleBitcodeWriter(const Module *M, SmallVectorImpl<char> &Buffer,
+                      BitstreamWriter &Stream, bool ShouldPreserveUseListOrder,
+                      const ModuleSummaryIndex *Index, bool GenerateHash)
+      : BitcodeWriterBase(Stream), Buffer(Buffer), M(*M),
+        VE(*M, ShouldPreserveUseListOrder), Index(Index),
+        GenerateHash(GenerateHash), BitcodeStartBit(Stream.GetCurrentBitNo()) {
+    // Assign ValueIds to any callee values in the index that came from
+    // indirect call profiles and were recorded as a GUID not a Value*
+    // (which would have been assigned an ID by the ValueEnumerator).
+    // The starting ValueId is just after the number of values in the
+    // ValueEnumerator, so that they can be emitted in the VST.
+    GlobalValueId = VE.getValues().size();
+    if (!Index)
+      return;
+    for (const auto &GUIDSummaryLists : *Index)
+      // Examine all summaries for this GUID.
+      for (auto &Summary : GUIDSummaryLists.second)
+        if (auto FS = dyn_cast<FunctionSummary>(Summary.get()))
+          // For each call in the function summary, see if the call
+          // is to a GUID (which means it is for an indirect call,
+          // otherwise we would have a Value for it). If so, synthesize
+          // a value id.
+          for (auto &CallEdge : FS->calls())
+            if (CallEdge.first.isGUID())
+              assignValueId(CallEdge.first.getGUID());
+  }
+
+  /// Emit the current module to the bitstream.
+  void write();
+
+private:
+  uint64_t bitcodeStartBit() { return BitcodeStartBit; }
+
+  void writeAttributeGroupTable();
+  void writeAttributeTable();
+  void writeTypeTable();
+  void writeComdats();
+  void writeModuleInfo();
+  void writeValueAsMetadata(const ValueAsMetadata *MD,
+                            SmallVectorImpl<uint64_t> &Record);
+  void writeMDTuple(const MDTuple *N, SmallVectorImpl<uint64_t> &Record,
+                    unsigned Abbrev);
+  unsigned createDILocationAbbrev();
+  void writeDILocation(const DILocation *N, SmallVectorImpl<uint64_t> &Record,
+                       unsigned &Abbrev);
+  unsigned createGenericDINodeAbbrev();
+  void writeGenericDINode(const GenericDINode *N,
+                          SmallVectorImpl<uint64_t> &Record, unsigned &Abbrev);
+  void writeDISubrange(const DISubrange *N, SmallVectorImpl<uint64_t> &Record,
+                       unsigned Abbrev);
+  void writeDIEnumerator(const DIEnumerator *N,
+                         SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDIBasicType(const DIBasicType *N, SmallVectorImpl<uint64_t> &Record,
+                        unsigned Abbrev);
+  void writeDIDerivedType(const DIDerivedType *N,
+                          SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDICompositeType(const DICompositeType *N,
+                            SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDISubroutineType(const DISubroutineType *N,
+                             SmallVectorImpl<uint64_t> &Record,
+                             unsigned Abbrev);
+  void writeDIFile(const DIFile *N, SmallVectorImpl<uint64_t> &Record,
+                   unsigned Abbrev);
+  void writeDICompileUnit(const DICompileUnit *N,
+                          SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDISubprogram(const DISubprogram *N,
+                         SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDILexicalBlock(const DILexicalBlock *N,
+                           SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDILexicalBlockFile(const DILexicalBlockFile *N,
+                               SmallVectorImpl<uint64_t> &Record,
+                               unsigned Abbrev);
+  void writeDINamespace(const DINamespace *N, SmallVectorImpl<uint64_t> &Record,
+                        unsigned Abbrev);
+  void writeDIMacro(const DIMacro *N, SmallVectorImpl<uint64_t> &Record,
+                    unsigned Abbrev);
+  void writeDIMacroFile(const DIMacroFile *N, SmallVectorImpl<uint64_t> &Record,
+                        unsigned Abbrev);
+  void writeDIModule(const DIModule *N, SmallVectorImpl<uint64_t> &Record,
+                     unsigned Abbrev);
+  void writeDITemplateTypeParameter(const DITemplateTypeParameter *N,
+                                    SmallVectorImpl<uint64_t> &Record,
+                                    unsigned Abbrev);
+  void writeDITemplateValueParameter(const DITemplateValueParameter *N,
+                                     SmallVectorImpl<uint64_t> &Record,
+                                     unsigned Abbrev);
+  void writeDIGlobalVariable(const DIGlobalVariable *N,
+                             SmallVectorImpl<uint64_t> &Record,
+                             unsigned Abbrev);
+  void writeDILocalVariable(const DILocalVariable *N,
+                            SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDIExpression(const DIExpression *N,
+                         SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDIGlobalVariableExpression(const DIGlobalVariableExpression *N,
+                                       SmallVectorImpl<uint64_t> &Record,
+                                       unsigned Abbrev);
+  void writeDIObjCProperty(const DIObjCProperty *N,
+                           SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+  void writeDIImportedEntity(const DIImportedEntity *N,
+                             SmallVectorImpl<uint64_t> &Record,
+                             unsigned Abbrev);
+  unsigned createNamedMetadataAbbrev();
+  void writeNamedMetadata(SmallVectorImpl<uint64_t> &Record);
+  unsigned createMetadataStringsAbbrev();
+  void writeMetadataStrings(ArrayRef<const Metadata *> Strings,
+                            SmallVectorImpl<uint64_t> &Record);
+  void writeMetadataRecords(ArrayRef<const Metadata *> MDs,
+                            SmallVectorImpl<uint64_t> &Record,
+                            std::vector<unsigned> *MDAbbrevs = nullptr,
+                            std::vector<uint64_t> *IndexPos = nullptr);
+  void writeModuleMetadata();
+  void writeFunctionMetadata(const Function &F);
+  void writeFunctionMetadataAttachment(const Function &F);
+  void writeGlobalVariableMetadataAttachment(const GlobalVariable &GV);
+  void pushGlobalMetadataAttachment(SmallVectorImpl<uint64_t> &Record,
+                                    const GlobalObject &GO);
+  void writeModuleMetadataKinds();
+  void writeOperandBundleTags();
+  void writeConstants(unsigned FirstVal, unsigned LastVal, bool isGlobal);
+  void writeModuleConstants();
+  bool pushValueAndType(const Value *V, unsigned InstID,
+                        SmallVectorImpl<unsigned> &Vals);
+  void writeOperandBundles(ImmutableCallSite CS, unsigned InstID);
+  void pushValue(const Value *V, unsigned InstID,
+                 SmallVectorImpl<unsigned> &Vals);
+  void pushValueSigned(const Value *V, unsigned InstID,
+                       SmallVectorImpl<uint64_t> &Vals);
+  void writeInstruction(const Instruction &I, unsigned InstID,
+                        SmallVectorImpl<unsigned> &Vals);
+  void writeValueSymbolTable(
+      const ValueSymbolTable &VST, bool IsModuleLevel = false,
+      DenseMap<const Function *, uint64_t> *FunctionToBitcodeIndex = nullptr);
+  void writeUseList(UseListOrder &&Order);
+  void writeUseListBlock(const Function *F);
+  void
+  writeFunction(const Function &F,
+                DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex);
+  void writeBlockInfo();
+  void writePerModuleFunctionSummaryRecord(SmallVector<uint64_t, 64> &NameVals,
+                                           GlobalValueSummary *Summary,
+                                           unsigned ValueID,
+                                           unsigned FSCallsAbbrev,
+                                           unsigned FSCallsProfileAbbrev,
+                                           const Function &F);
+  void writeModuleLevelReferences(const GlobalVariable &V,
+                                  SmallVector<uint64_t, 64> &NameVals,
+                                  unsigned FSModRefsAbbrev);
+  void writePerModuleGlobalValueSummary();
+  void writeModuleHash(size_t BlockStartPos);
+
+  void assignValueId(GlobalValue::GUID ValGUID) {
+    GUIDToValueIdMap[ValGUID] = ++GlobalValueId;
+  }
+  unsigned getValueId(GlobalValue::GUID ValGUID) {
+    const auto &VMI = GUIDToValueIdMap.find(ValGUID);
+    // Expect that any GUID value had a value Id assigned by an
+    // earlier call to assignValueId.
+    assert(VMI != GUIDToValueIdMap.end() &&
+           "GUID does not have assigned value Id");
+    return VMI->second;
+  }
+  // Helper to get the valueId for the type of value recorded in VI.
+  unsigned getValueId(ValueInfo VI) {
+    if (VI.isGUID())
+      return getValueId(VI.getGUID());
+    return VE.getValueID(VI.getValue());
+  }
+  std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; }
+};
+
+/// Class to manage the bitcode writing for a combined index.
+class IndexBitcodeWriter : public BitcodeWriterBase {
+  /// The combined index to write to bitcode.
+  const ModuleSummaryIndex &Index;
+
+  /// When writing a subset of the index for distributed backends, client
+  /// provides a map of modules to the corresponding GUIDs/summaries to write.
+  const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex;
+
+  /// Map that holds the correspondence between the GUID used in the combined
+  /// index and a value id generated by this class to use in references.
+  std::map<GlobalValue::GUID, unsigned> GUIDToValueIdMap;
+
+  /// Tracks the last value id recorded in the GUIDToValueMap.
+  unsigned GlobalValueId = 0;
+
+public:
+  /// Constructs a IndexBitcodeWriter object for the given combined index,
+  /// writing to the provided \p Buffer. When writing a subset of the index
+  /// for a distributed backend, provide a \p ModuleToSummariesForIndex map.
+  IndexBitcodeWriter(BitstreamWriter &Stream, const ModuleSummaryIndex &Index,
+                     const std::map<std::string, GVSummaryMapTy>
+                         *ModuleToSummariesForIndex = nullptr)
+      : BitcodeWriterBase(Stream), Index(Index),
+        ModuleToSummariesForIndex(ModuleToSummariesForIndex) {
+    // Assign unique value ids to all summaries to be written, for use
+    // in writing out the call graph edges. Save the mapping from GUID
+    // to the new global value id to use when writing those edges, which
+    // are currently saved in the index in terms of GUID.
+    for (const auto &I : *this)
+      GUIDToValueIdMap[I.first] = ++GlobalValueId;
+  }
+
+  /// The below iterator returns the GUID and associated summary.
+  typedef std::pair<GlobalValue::GUID, GlobalValueSummary *> GVInfo;
+
+  /// Iterator over the value GUID and summaries to be written to bitcode,
+  /// hides the details of whether they are being pulled from the entire
+  /// index or just those in a provided ModuleToSummariesForIndex map.
+  class iterator
+      : public llvm::iterator_facade_base<iterator, std::forward_iterator_tag,
+                                          GVInfo> {
+    /// Enables access to parent class.
+    const IndexBitcodeWriter &Writer;
+
+    // Iterators used when writing only those summaries in a provided
+    // ModuleToSummariesForIndex map:
+
+    /// Points to the last element in outer ModuleToSummariesForIndex map.
+    std::map<std::string, GVSummaryMapTy>::const_iterator ModuleSummariesBack;
+    /// Iterator on outer ModuleToSummariesForIndex map.
+    std::map<std::string, GVSummaryMapTy>::const_iterator ModuleSummariesIter;
+    /// Iterator on an inner global variable summary map.
+    GVSummaryMapTy::const_iterator ModuleGVSummariesIter;
+
+    // Iterators used when writing all summaries in the index:
+
+    /// Points to the last element in the Index outer GlobalValueMap.
+    const_gvsummary_iterator IndexSummariesBack;
+    /// Iterator on outer GlobalValueMap.
+    const_gvsummary_iterator IndexSummariesIter;
+    /// Iterator on an inner GlobalValueSummaryList.
+    GlobalValueSummaryList::const_iterator IndexGVSummariesIter;
+
+  public:
+    /// Construct iterator from parent \p Writer and indicate if we are
+    /// constructing the end iterator.
+    iterator(const IndexBitcodeWriter &Writer, bool IsAtEnd) : Writer(Writer) {
+      // Set up the appropriate set of iterators given whether we are writing
+      // the full index or just a subset.
+      // Can't setup the Back or inner iterators if the corresponding map
+      // is empty. This will be handled specially in operator== as well.
+      if (Writer.ModuleToSummariesForIndex &&
+          !Writer.ModuleToSummariesForIndex->empty()) {
+        for (ModuleSummariesBack = Writer.ModuleToSummariesForIndex->begin();
+             std::next(ModuleSummariesBack) !=
+             Writer.ModuleToSummariesForIndex->end();
+             ModuleSummariesBack++)
+          ;
+        ModuleSummariesIter = !IsAtEnd
+                                  ? Writer.ModuleToSummariesForIndex->begin()
+                                  : ModuleSummariesBack;
+        ModuleGVSummariesIter = !IsAtEnd ? ModuleSummariesIter->second.begin()
+                                         : ModuleSummariesBack->second.end();
+      } else if (!Writer.ModuleToSummariesForIndex &&
+                 Writer.Index.begin() != Writer.Index.end()) {
+        for (IndexSummariesBack = Writer.Index.begin();
+             std::next(IndexSummariesBack) != Writer.Index.end();
+             IndexSummariesBack++)
+          ;
+        IndexSummariesIter =
+            !IsAtEnd ? Writer.Index.begin() : IndexSummariesBack;
+        IndexGVSummariesIter = !IsAtEnd ? IndexSummariesIter->second.begin()
+                                        : IndexSummariesBack->second.end();
+      }
+    }
+
+    /// Increment the appropriate set of iterators.
+    iterator &operator++() {
+      // First the inner iterator is incremented, then if it is at the end
+      // and there are more outer iterations to go, the inner is reset to
+      // the start of the next inner list.
+      if (Writer.ModuleToSummariesForIndex) {
+        ++ModuleGVSummariesIter;
+        if (ModuleGVSummariesIter == ModuleSummariesIter->second.end() &&
+            ModuleSummariesIter != ModuleSummariesBack) {
+          ++ModuleSummariesIter;
+          ModuleGVSummariesIter = ModuleSummariesIter->second.begin();
+        }
+      } else {
+        ++IndexGVSummariesIter;
+        if (IndexGVSummariesIter == IndexSummariesIter->second.end() &&
+            IndexSummariesIter != IndexSummariesBack) {
+          ++IndexSummariesIter;
+          IndexGVSummariesIter = IndexSummariesIter->second.begin();
+        }
+      }
+      return *this;
+    }
+
+    /// Access the <GUID,GlobalValueSummary*> pair corresponding to the current
+    /// outer and inner iterator positions.
+    GVInfo operator*() {
+      if (Writer.ModuleToSummariesForIndex)
+        return std::make_pair(ModuleGVSummariesIter->first,
+                              ModuleGVSummariesIter->second);
+      return std::make_pair(IndexSummariesIter->first,
+                            IndexGVSummariesIter->get());
+    }
+
+    /// Checks if the iterators are equal, with special handling for empty
+    /// indexes.
+    bool operator==(const iterator &RHS) const {
+      if (Writer.ModuleToSummariesForIndex) {
+        // First ensure that both are writing the same subset.
+        if (Writer.ModuleToSummariesForIndex !=
+            RHS.Writer.ModuleToSummariesForIndex)
+          return false;
+        // Already determined above that maps are the same, so if one is
+        // empty, they both are.
+        if (Writer.ModuleToSummariesForIndex->empty())
+          return true;
+        // Ensure the ModuleGVSummariesIter are iterating over the same
+        // container before checking them below.
+        if (ModuleSummariesIter != RHS.ModuleSummariesIter)
+          return false;
+        return ModuleGVSummariesIter == RHS.ModuleGVSummariesIter;
+      }
+      // First ensure RHS also writing the full index, and that both are
+      // writing the same full index.
+      if (RHS.Writer.ModuleToSummariesForIndex ||
+          &Writer.Index != &RHS.Writer.Index)
+        return false;
+      // Already determined above that maps are the same, so if one is
+      // empty, they both are.
+      if (Writer.Index.begin() == Writer.Index.end())
+        return true;
+      // Ensure the IndexGVSummariesIter are iterating over the same
+      // container before checking them below.
+      if (IndexSummariesIter != RHS.IndexSummariesIter)
+        return false;
+      return IndexGVSummariesIter == RHS.IndexGVSummariesIter;
+    }
+  };
+
+  /// Obtain the start iterator over the summaries to be written.
+  iterator begin() { return iterator(*this, /*IsAtEnd=*/false); }
+  /// Obtain the end iterator over the summaries to be written.
+  iterator end() { return iterator(*this, /*IsAtEnd=*/true); }
+
+  /// Main entry point for writing a combined index to bitcode.
+  void write();
+
+private:
+  void writeIndex();
+  void writeModStrings();
+  void writeCombinedValueSymbolTable();
+  void writeCombinedGlobalValueSummary();
+
+  /// Indicates whether the provided \p ModulePath should be written into
+  /// the module string table, e.g. if full index written or if it is in
+  /// the provided subset.
+  bool doIncludeModule(StringRef ModulePath) {
+    return !ModuleToSummariesForIndex ||
+           ModuleToSummariesForIndex->count(ModulePath);
+  }
+
+  bool hasValueId(GlobalValue::GUID ValGUID) {
+    const auto &VMI = GUIDToValueIdMap.find(ValGUID);
+    return VMI != GUIDToValueIdMap.end();
+  }
+  unsigned getValueId(GlobalValue::GUID ValGUID) {
+    const auto &VMI = GUIDToValueIdMap.find(ValGUID);
+    // If this GUID doesn't have an entry, assign one.
+    if (VMI == GUIDToValueIdMap.end()) {
+      GUIDToValueIdMap[ValGUID] = ++GlobalValueId;
+      return GlobalValueId;
+    } else {
+      return VMI->second;
+    }
+  }
+  std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; }
+};
+} // end anonymous namespace
+
+static unsigned getEncodedCastOpcode(unsigned Opcode) {
+  switch (Opcode) {
+  default: llvm_unreachable("Unknown cast instruction!");
+  case Instruction::Trunc   : return bitc::CAST_TRUNC;
+  case Instruction::ZExt    : return bitc::CAST_ZEXT;
+  case Instruction::SExt    : return bitc::CAST_SEXT;
+  case Instruction::FPToUI  : return bitc::CAST_FPTOUI;
+  case Instruction::FPToSI  : return bitc::CAST_FPTOSI;
+  case Instruction::UIToFP  : return bitc::CAST_UITOFP;
+  case Instruction::SIToFP  : return bitc::CAST_SITOFP;
+  case Instruction::FPTrunc : return bitc::CAST_FPTRUNC;
+  case Instruction::FPExt   : return bitc::CAST_FPEXT;
+  case Instruction::PtrToInt: return bitc::CAST_PTRTOINT;
+  case Instruction::IntToPtr: return bitc::CAST_INTTOPTR;
+  case Instruction::BitCast : return bitc::CAST_BITCAST;
+  case Instruction::AddrSpaceCast: return bitc::CAST_ADDRSPACECAST;
+  }
+}
+
+static unsigned getEncodedBinaryOpcode(unsigned Opcode) {
+  switch (Opcode) {
+  default: llvm_unreachable("Unknown binary instruction!");
+  case Instruction::Add:
+  case Instruction::FAdd: return bitc::BINOP_ADD;
+  case Instruction::Sub:
+  case Instruction::FSub: return bitc::BINOP_SUB;
+  case Instruction::Mul:
+  case Instruction::FMul: return bitc::BINOP_MUL;
+  case Instruction::UDiv: return bitc::BINOP_UDIV;
+  case Instruction::FDiv:
+  case Instruction::SDiv: return bitc::BINOP_SDIV;
+  case Instruction::URem: return bitc::BINOP_UREM;
+  case Instruction::FRem:
+  case Instruction::SRem: return bitc::BINOP_SREM;
+  case Instruction::Shl:  return bitc::BINOP_SHL;
+  case Instruction::LShr: return bitc::BINOP_LSHR;
+  case Instruction::AShr: return bitc::BINOP_ASHR;
+  case Instruction::And:  return bitc::BINOP_AND;
+  case Instruction::Or:   return bitc::BINOP_OR;
+  case Instruction::Xor:  return bitc::BINOP_XOR;
+  }
+}
+
+static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
+  switch (Op) {
+  default: llvm_unreachable("Unknown RMW operation!");
+  case AtomicRMWInst::Xchg: return bitc::RMW_XCHG;
+  case AtomicRMWInst::Add: return bitc::RMW_ADD;
+  case AtomicRMWInst::Sub: return bitc::RMW_SUB;
+  case AtomicRMWInst::And: return bitc::RMW_AND;
+  case AtomicRMWInst::Nand: return bitc::RMW_NAND;
+  case AtomicRMWInst::Or: return bitc::RMW_OR;
+  case AtomicRMWInst::Xor: return bitc::RMW_XOR;
+  case AtomicRMWInst::Max: return bitc::RMW_MAX;
+  case AtomicRMWInst::Min: return bitc::RMW_MIN;
+  case AtomicRMWInst::UMax: return bitc::RMW_UMAX;
+  case AtomicRMWInst::UMin: return bitc::RMW_UMIN;
+  }
+}
+
+static unsigned getEncodedOrdering(AtomicOrdering Ordering) {
+  switch (Ordering) {
+  case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC;
+  case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED;
+  case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC;
+  case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE;
+  case AtomicOrdering::Release: return bitc::ORDERING_RELEASE;
+  case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL;
+  case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST;
+  }
+  llvm_unreachable("Invalid ordering");
+}
+
+static unsigned getEncodedSynchScope(SynchronizationScope SynchScope) {
+  switch (SynchScope) {
+  case SingleThread: return bitc::SYNCHSCOPE_SINGLETHREAD;
+  case CrossThread: return bitc::SYNCHSCOPE_CROSSTHREAD;
+  }
+  llvm_unreachable("Invalid synch scope");
+}
+
+static void writeStringRecord(BitstreamWriter &Stream, unsigned Code,
+                              StringRef Str, unsigned AbbrevToUse) {
+  SmallVector<unsigned, 64> Vals;
+
+  // Code: [strchar x N]
+  for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+    if (AbbrevToUse && !BitCodeAbbrevOp::isChar6(Str[i]))
+      AbbrevToUse = 0;
+    Vals.push_back(Str[i]);
+  }
+
+  // Emit the finished record.
+  Stream.EmitRecord(Code, Vals, AbbrevToUse);
+}
+
+static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
+  switch (Kind) {
+  case Attribute::Alignment:
+    return bitc::ATTR_KIND_ALIGNMENT;
+  case Attribute::AllocSize:
+    return bitc::ATTR_KIND_ALLOC_SIZE;
+  case Attribute::AlwaysInline:
+    return bitc::ATTR_KIND_ALWAYS_INLINE;
+  case Attribute::ArgMemOnly:
+    return bitc::ATTR_KIND_ARGMEMONLY;
+  case Attribute::Builtin:
+    return bitc::ATTR_KIND_BUILTIN;
+  case Attribute::ByVal:
+    return bitc::ATTR_KIND_BY_VAL;
+  case Attribute::Convergent:
+    return bitc::ATTR_KIND_CONVERGENT;
+  case Attribute::InAlloca:
+    return bitc::ATTR_KIND_IN_ALLOCA;
+  case Attribute::Cold:
+    return bitc::ATTR_KIND_COLD;
+  case Attribute::InaccessibleMemOnly:
+    return bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY;
+  case Attribute::InaccessibleMemOrArgMemOnly:
+    return bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY;
+  case Attribute::InlineHint:
+    return bitc::ATTR_KIND_INLINE_HINT;
+  case Attribute::InReg:
+    return bitc::ATTR_KIND_IN_REG;
+  case Attribute::JumpTable:
+    return bitc::ATTR_KIND_JUMP_TABLE;
+  case Attribute::MinSize:
+    return bitc::ATTR_KIND_MIN_SIZE;
+  case Attribute::Naked:
+    return bitc::ATTR_KIND_NAKED;
+  case Attribute::Nest:
+    return bitc::ATTR_KIND_NEST;
+  case Attribute::NoAlias:
+    return bitc::ATTR_KIND_NO_ALIAS;
+  case Attribute::NoBuiltin:
+    return bitc::ATTR_KIND_NO_BUILTIN;
+  case Attribute::NoCapture:
+    return bitc::ATTR_KIND_NO_CAPTURE;
+  case Attribute::NoDuplicate:
+    return bitc::ATTR_KIND_NO_DUPLICATE;
+  case Attribute::NoImplicitFloat:
+    return bitc::ATTR_KIND_NO_IMPLICIT_FLOAT;
+  case Attribute::NoInline:
+    return bitc::ATTR_KIND_NO_INLINE;
+  case Attribute::NoRecurse:
+    return bitc::ATTR_KIND_NO_RECURSE;
+  case Attribute::NonLazyBind:
+    return bitc::ATTR_KIND_NON_LAZY_BIND;
+  case Attribute::NonNull:
+    return bitc::ATTR_KIND_NON_NULL;
+  case Attribute::Dereferenceable:
+    return bitc::ATTR_KIND_DEREFERENCEABLE;
+  case Attribute::DereferenceableOrNull:
+    return bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL;
+  case Attribute::NoRedZone:
+    return bitc::ATTR_KIND_NO_RED_ZONE;
+  case Attribute::NoReturn:
+    return bitc::ATTR_KIND_NO_RETURN;
+  case Attribute::NoUnwind:
+    return bitc::ATTR_KIND_NO_UNWIND;
+  case Attribute::OptimizeForSize:
+    return bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE;
+  case Attribute::OptimizeNone:
+    return bitc::ATTR_KIND_OPTIMIZE_NONE;
+  case Attribute::ReadNone:
+    return bitc::ATTR_KIND_READ_NONE;
+  case Attribute::ReadOnly:
+    return bitc::ATTR_KIND_READ_ONLY;
+  case Attribute::Returned:
+    return bitc::ATTR_KIND_RETURNED;
+  case Attribute::ReturnsTwice:
+    return bitc::ATTR_KIND_RETURNS_TWICE;
+  case Attribute::SExt:
+    return bitc::ATTR_KIND_S_EXT;
+  case Attribute::StackAlignment:
+    return bitc::ATTR_KIND_STACK_ALIGNMENT;
+  case Attribute::StackProtect:
+    return bitc::ATTR_KIND_STACK_PROTECT;
+  case Attribute::StackProtectReq:
+    return bitc::ATTR_KIND_STACK_PROTECT_REQ;
+  case Attribute::StackProtectStrong:
+    return bitc::ATTR_KIND_STACK_PROTECT_STRONG;
+  case Attribute::SafeStack:
+    return bitc::ATTR_KIND_SAFESTACK;
+  case Attribute::StructRet:
+    return bitc::ATTR_KIND_STRUCT_RET;
+  case Attribute::SanitizeAddress:
+    return bitc::ATTR_KIND_SANITIZE_ADDRESS;
+  case Attribute::SanitizeThread:
+    return bitc::ATTR_KIND_SANITIZE_THREAD;
+  case Attribute::SanitizeMemory:
+    return bitc::ATTR_KIND_SANITIZE_MEMORY;
+  case Attribute::SwiftError:
+    return bitc::ATTR_KIND_SWIFT_ERROR;
+  case Attribute::SwiftSelf:
+    return bitc::ATTR_KIND_SWIFT_SELF;
+  case Attribute::UWTable:
+    return bitc::ATTR_KIND_UW_TABLE;
+  case Attribute::WriteOnly:
+    return bitc::ATTR_KIND_WRITEONLY;
+  case Attribute::ZExt:
+    return bitc::ATTR_KIND_Z_EXT;
+
+  // VISC Attributes
+  case Attribute::In:
+    return bitc::ATTR_KIND_IN;
+  case Attribute::Out:
+    return bitc::ATTR_KIND_OUT;
+  case Attribute::InOut:
+    return bitc::ATTR_KIND_INOUT;
+
+  case Attribute::EndAttrKinds:
+    llvm_unreachable("Can not encode end-attribute kinds marker.");
+  case Attribute::None:
+    llvm_unreachable("Can not encode none-attribute.");
+  }
+
+  llvm_unreachable("Trying to encode unknown attribute");
+}
+
+void ModuleBitcodeWriter::writeAttributeGroupTable() {
+  const std::vector<AttributeSet> &AttrGrps = VE.getAttributeGroups();
+  if (AttrGrps.empty()) return;
+
+  Stream.EnterSubblock(bitc::PARAMATTR_GROUP_BLOCK_ID, 3);
+
+  SmallVector<uint64_t, 64> Record;
+  for (unsigned i = 0, e = AttrGrps.size(); i != e; ++i) {
+    AttributeSet AS = AttrGrps[i];
+    for (unsigned i = 0, e = AS.getNumSlots(); i != e; ++i) {
+      AttributeSet A = AS.getSlotAttributes(i);
+
+      Record.push_back(VE.getAttributeGroupID(A));
+      Record.push_back(AS.getSlotIndex(i));
+
+      for (AttributeSet::iterator I = AS.begin(0), E = AS.end(0);
+           I != E; ++I) {
+        Attribute Attr = *I;
+        if (Attr.isEnumAttribute()) {
+          Record.push_back(0);
+          Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
+        } else if (Attr.isIntAttribute()) {
+          Record.push_back(1);
+          Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
+          Record.push_back(Attr.getValueAsInt());
+        } else {
+          StringRef Kind = Attr.getKindAsString();
+          StringRef Val = Attr.getValueAsString();
+
+          Record.push_back(Val.empty() ? 3 : 4);
+          Record.append(Kind.begin(), Kind.end());
+          Record.push_back(0);
+          if (!Val.empty()) {
+            Record.append(Val.begin(), Val.end());
+            Record.push_back(0);
+          }
+        }
+      }
+
+      Stream.EmitRecord(bitc::PARAMATTR_GRP_CODE_ENTRY, Record);
+      Record.clear();
+    }
+  }
+
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeAttributeTable() {
+  const std::vector<AttributeSet> &Attrs = VE.getAttributes();
+  if (Attrs.empty()) return;
+
+  Stream.EnterSubblock(bitc::PARAMATTR_BLOCK_ID, 3);
+
+  SmallVector<uint64_t, 64> Record;
+  for (unsigned i = 0, e = Attrs.size(); i != e; ++i) {
+    const AttributeSet &A = Attrs[i];
+    for (unsigned i = 0, e = A.getNumSlots(); i != e; ++i)
+      Record.push_back(VE.getAttributeGroupID(A.getSlotAttributes(i)));
+
+    Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
+    Record.clear();
+  }
+
+  Stream.ExitBlock();
+}
+
+/// WriteTypeTable - Write out the type table for a module.
+void ModuleBitcodeWriter::writeTypeTable() {
+  const ValueEnumerator::TypeList &TypeList = VE.getTypes();
+
+  Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */);
+  SmallVector<uint64_t, 64> TypeVals;
+
+  uint64_t NumBits = VE.computeBitsRequiredForTypeIndicies();
+
+  // Abbrev for TYPE_CODE_POINTER.
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_POINTER));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+  Abbv->Add(BitCodeAbbrevOp(0));  // Addrspace = 0
+  unsigned PtrAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for TYPE_CODE_FUNCTION.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_FUNCTION));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));  // isvararg
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
+  unsigned FunctionAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for TYPE_CODE_STRUCT_ANON.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_ANON));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));  // ispacked
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
+  unsigned StructAnonAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for TYPE_CODE_STRUCT_NAME.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAME));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+  unsigned StructNameAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for TYPE_CODE_STRUCT_NAMED.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAMED));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));  // ispacked
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
+  unsigned StructNamedAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for TYPE_CODE_ARRAY.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // size
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+
+  unsigned ArrayAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Emit an entry count so the reader can reserve space.
+  TypeVals.push_back(TypeList.size());
+  Stream.EmitRecord(bitc::TYPE_CODE_NUMENTRY, TypeVals);
+  TypeVals.clear();
+
+  // Loop over all of the types, emitting each in turn.
+  for (unsigned i = 0, e = TypeList.size(); i != e; ++i) {
+    Type *T = TypeList[i];
+    int AbbrevToUse = 0;
+    unsigned Code = 0;
+
+    switch (T->getTypeID()) {
+    case Type::VoidTyID:      Code = bitc::TYPE_CODE_VOID;      break;
+    case Type::HalfTyID:      Code = bitc::TYPE_CODE_HALF;      break;
+    case Type::FloatTyID:     Code = bitc::TYPE_CODE_FLOAT;     break;
+    case Type::DoubleTyID:    Code = bitc::TYPE_CODE_DOUBLE;    break;
+    case Type::X86_FP80TyID:  Code = bitc::TYPE_CODE_X86_FP80;  break;
+    case Type::FP128TyID:     Code = bitc::TYPE_CODE_FP128;     break;
+    case Type::PPC_FP128TyID: Code = bitc::TYPE_CODE_PPC_FP128; break;
+    case Type::LabelTyID:     Code = bitc::TYPE_CODE_LABEL;     break;
+    case Type::MetadataTyID:  Code = bitc::TYPE_CODE_METADATA;  break;
+    case Type::X86_MMXTyID:   Code = bitc::TYPE_CODE_X86_MMX;   break;
+    case Type::TokenTyID:     Code = bitc::TYPE_CODE_TOKEN;     break;
+    case Type::IntegerTyID:
+      // INTEGER: [width]
+      Code = bitc::TYPE_CODE_INTEGER;
+      TypeVals.push_back(cast<IntegerType>(T)->getBitWidth());
+      break;
+    case Type::PointerTyID: {
+      PointerType *PTy = cast<PointerType>(T);
+      // POINTER: [pointee type, address space]
+      Code = bitc::TYPE_CODE_POINTER;
+      TypeVals.push_back(VE.getTypeID(PTy->getElementType()));
+      unsigned AddressSpace = PTy->getAddressSpace();
+      TypeVals.push_back(AddressSpace);
+      if (AddressSpace == 0) AbbrevToUse = PtrAbbrev;
+      break;
+    }
+    case Type::FunctionTyID: {
+      FunctionType *FT = cast<FunctionType>(T);
+      // FUNCTION: [isvararg, retty, paramty x N]
+      Code = bitc::TYPE_CODE_FUNCTION;
+      TypeVals.push_back(FT->isVarArg());
+      TypeVals.push_back(VE.getTypeID(FT->getReturnType()));
+      for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i)
+        TypeVals.push_back(VE.getTypeID(FT->getParamType(i)));
+      AbbrevToUse = FunctionAbbrev;
+      break;
+    }
+    case Type::StructTyID: {
+      StructType *ST = cast<StructType>(T);
+      // STRUCT: [ispacked, eltty x N]
+      TypeVals.push_back(ST->isPacked());
+      // Output all of the element types.
+      for (StructType::element_iterator I = ST->element_begin(),
+           E = ST->element_end(); I != E; ++I)
+        TypeVals.push_back(VE.getTypeID(*I));
+
+      if (ST->isLiteral()) {
+        Code = bitc::TYPE_CODE_STRUCT_ANON;
+        AbbrevToUse = StructAnonAbbrev;
+      } else {
+        if (ST->isOpaque()) {
+          Code = bitc::TYPE_CODE_OPAQUE;
+        } else {
+          Code = bitc::TYPE_CODE_STRUCT_NAMED;
+          AbbrevToUse = StructNamedAbbrev;
+        }
+
+        // Emit the name if it is present.
+        if (!ST->getName().empty())
+          writeStringRecord(Stream, bitc::TYPE_CODE_STRUCT_NAME, ST->getName(),
+                            StructNameAbbrev);
+      }
+      break;
+    }
+    case Type::ArrayTyID: {
+      ArrayType *AT = cast<ArrayType>(T);
+      // ARRAY: [numelts, eltty]
+      Code = bitc::TYPE_CODE_ARRAY;
+      TypeVals.push_back(AT->getNumElements());
+      TypeVals.push_back(VE.getTypeID(AT->getElementType()));
+      AbbrevToUse = ArrayAbbrev;
+      break;
+    }
+    case Type::VectorTyID: {
+      VectorType *VT = cast<VectorType>(T);
+      // VECTOR [numelts, eltty]
+      Code = bitc::TYPE_CODE_VECTOR;
+      TypeVals.push_back(VT->getNumElements());
+      TypeVals.push_back(VE.getTypeID(VT->getElementType()));
+      break;
+    }
+    }
+
+    // Emit the finished record.
+    Stream.EmitRecord(Code, TypeVals, AbbrevToUse);
+    TypeVals.clear();
+  }
+
+  Stream.ExitBlock();
+}
+
+static unsigned getEncodedLinkage(const GlobalValue::LinkageTypes Linkage) {
+  switch (Linkage) {
+  case GlobalValue::ExternalLinkage:
+    return 0;
+  case GlobalValue::WeakAnyLinkage:
+    return 16;
+  case GlobalValue::AppendingLinkage:
+    return 2;
+  case GlobalValue::InternalLinkage:
+    return 3;
+  case GlobalValue::LinkOnceAnyLinkage:
+    return 18;
+  case GlobalValue::ExternalWeakLinkage:
+    return 7;
+  case GlobalValue::CommonLinkage:
+    return 8;
+  case GlobalValue::PrivateLinkage:
+    return 9;
+  case GlobalValue::WeakODRLinkage:
+    return 17;
+  case GlobalValue::LinkOnceODRLinkage:
+    return 19;
+  case GlobalValue::AvailableExternallyLinkage:
+    return 12;
+  }
+  llvm_unreachable("Invalid linkage");
+}
+
+static unsigned getEncodedLinkage(const GlobalValue &GV) {
+  return getEncodedLinkage(GV.getLinkage());
+}
+
+// Decode the flags for GlobalValue in the summary
+static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags) {
+  uint64_t RawFlags = 0;
+
+  RawFlags |= Flags.NotEligibleToImport; // bool
+  RawFlags |= (Flags.LiveRoot << 1);
+  // Linkage don't need to be remapped at that time for the summary. Any future
+  // change to the getEncodedLinkage() function will need to be taken into
+  // account here as well.
+  RawFlags = (RawFlags << 4) | Flags.Linkage; // 4 bits
+
+  return RawFlags;
+}
+
+static unsigned getEncodedVisibility(const GlobalValue &GV) {
+  switch (GV.getVisibility()) {
+  case GlobalValue::DefaultVisibility:   return 0;
+  case GlobalValue::HiddenVisibility:    return 1;
+  case GlobalValue::ProtectedVisibility: return 2;
+  }
+  llvm_unreachable("Invalid visibility");
+}
+
+static unsigned getEncodedDLLStorageClass(const GlobalValue &GV) {
+  switch (GV.getDLLStorageClass()) {
+  case GlobalValue::DefaultStorageClass:   return 0;
+  case GlobalValue::DLLImportStorageClass: return 1;
+  case GlobalValue::DLLExportStorageClass: return 2;
+  }
+  llvm_unreachable("Invalid DLL storage class");
+}
+
+static unsigned getEncodedThreadLocalMode(const GlobalValue &GV) {
+  switch (GV.getThreadLocalMode()) {
+    case GlobalVariable::NotThreadLocal:         return 0;
+    case GlobalVariable::GeneralDynamicTLSModel: return 1;
+    case GlobalVariable::LocalDynamicTLSModel:   return 2;
+    case GlobalVariable::InitialExecTLSModel:    return 3;
+    case GlobalVariable::LocalExecTLSModel:      return 4;
+  }
+  llvm_unreachable("Invalid TLS model");
+}
+
+static unsigned getEncodedComdatSelectionKind(const Comdat &C) {
+  switch (C.getSelectionKind()) {
+  case Comdat::Any:
+    return bitc::COMDAT_SELECTION_KIND_ANY;
+  case Comdat::ExactMatch:
+    return bitc::COMDAT_SELECTION_KIND_EXACT_MATCH;
+  case Comdat::Largest:
+    return bitc::COMDAT_SELECTION_KIND_LARGEST;
+  case Comdat::NoDuplicates:
+    return bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES;
+  case Comdat::SameSize:
+    return bitc::COMDAT_SELECTION_KIND_SAME_SIZE;
+  }
+  llvm_unreachable("Invalid selection kind");
+}
+
+static unsigned getEncodedUnnamedAddr(const GlobalValue &GV) {
+  switch (GV.getUnnamedAddr()) {
+  case GlobalValue::UnnamedAddr::None:   return 0;
+  case GlobalValue::UnnamedAddr::Local:  return 2;
+  case GlobalValue::UnnamedAddr::Global: return 1;
+  }
+  llvm_unreachable("Invalid unnamed_addr");
+}
+
+void ModuleBitcodeWriter::writeComdats() {
+  SmallVector<unsigned, 64> Vals;
+  for (const Comdat *C : VE.getComdats()) {
+    // COMDAT: [selection_kind, name]
+    Vals.push_back(getEncodedComdatSelectionKind(*C));
+    size_t Size = C->getName().size();
+    assert(isUInt<32>(Size));
+    Vals.push_back(Size);
+    for (char Chr : C->getName())
+      Vals.push_back((unsigned char)Chr);
+    Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0);
+    Vals.clear();
+  }
+}
+
+/// Write a record that will eventually hold the word offset of the
+/// module-level VST. For now the offset is 0, which will be backpatched
+/// after the real VST is written. Saves the bit offset to backpatch.
+void BitcodeWriterBase::writeValueSymbolTableForwardDecl() {
+  // Write a placeholder value in for the offset of the real VST,
+  // which is written after the function blocks so that it can include
+  // the offset of each function. The placeholder offset will be
+  // updated when the real VST is written.
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_VSTOFFSET));
+  // Blocks are 32-bit aligned, so we can use a 32-bit word offset to
+  // hold the real VST offset. Must use fixed instead of VBR as we don't
+  // know how many VBR chunks to reserve ahead of time.
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  unsigned VSTOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Emit the placeholder
+  uint64_t Vals[] = {bitc::MODULE_CODE_VSTOFFSET, 0};
+  Stream.EmitRecordWithAbbrev(VSTOffsetAbbrev, Vals);
+
+  // Compute and save the bit offset to the placeholder, which will be
+  // patched when the real VST is written. We can simply subtract the 32-bit
+  // fixed size from the current bit number to get the location to backpatch.
+  VSTOffsetPlaceholder = Stream.GetCurrentBitNo() - 32;
+}
+
+enum StringEncoding { SE_Char6, SE_Fixed7, SE_Fixed8 };
+
+/// Determine the encoding to use for the given string name and length.
+static StringEncoding getStringEncoding(const char *Str, unsigned StrLen) {
+  bool isChar6 = true;
+  for (const char *C = Str, *E = C + StrLen; C != E; ++C) {
+    if (isChar6)
+      isChar6 = BitCodeAbbrevOp::isChar6(*C);
+    if ((unsigned char)*C & 128)
+      // don't bother scanning the rest.
+      return SE_Fixed8;
+  }
+  if (isChar6)
+    return SE_Char6;
+  else
+    return SE_Fixed7;
+}
+
+/// Emit top-level description of module, including target triple, inline asm,
+/// descriptors for global variables, and function prototype info.
+/// Returns the bit offset to backpatch with the location of the real VST.
+void ModuleBitcodeWriter::writeModuleInfo() {
+  // Emit various pieces of data attached to a module.
+  if (!M.getTargetTriple().empty())
+    writeStringRecord(Stream, bitc::MODULE_CODE_TRIPLE, M.getTargetTriple(),
+                      0 /*TODO*/);
+  const std::string &DL = M.getDataLayoutStr();
+  if (!DL.empty())
+    writeStringRecord(Stream, bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/);
+  if (!M.getModuleInlineAsm().empty())
+    writeStringRecord(Stream, bitc::MODULE_CODE_ASM, M.getModuleInlineAsm(),
+                      0 /*TODO*/);
+
+  // Emit information about sections and GC, computing how many there are. Also
+  // compute the maximum alignment value.
+  std::map<std::string, unsigned> SectionMap;
+  std::map<std::string, unsigned> GCMap;
+  unsigned MaxAlignment = 0;
+  unsigned MaxGlobalType = 0;
+  for (const GlobalValue &GV : M.globals()) {
+    MaxAlignment = std::max(MaxAlignment, GV.getAlignment());
+    MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV.getValueType()));
+    if (GV.hasSection()) {
+      // Give section names unique ID's.
+      unsigned &Entry = SectionMap[GV.getSection()];
+      if (!Entry) {
+        writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, GV.getSection(),
+                          0 /*TODO*/);
+        Entry = SectionMap.size();
+      }
+    }
+  }
+  for (const Function &F : M) {
+    MaxAlignment = std::max(MaxAlignment, F.getAlignment());
+    if (F.hasSection()) {
+      // Give section names unique ID's.
+      unsigned &Entry = SectionMap[F.getSection()];
+      if (!Entry) {
+        writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, F.getSection(),
+                          0 /*TODO*/);
+        Entry = SectionMap.size();
+      }
+    }
+    if (F.hasGC()) {
+      // Same for GC names.
+      unsigned &Entry = GCMap[F.getGC()];
+      if (!Entry) {
+        writeStringRecord(Stream, bitc::MODULE_CODE_GCNAME, F.getGC(),
+                          0 /*TODO*/);
+        Entry = GCMap.size();
+      }
+    }
+  }
+
+  // Emit abbrev for globals, now that we know # sections and max alignment.
+  unsigned SimpleGVarAbbrev = 0;
+  if (!M.global_empty()) {
+    // Add an abbrev for common globals with no visibility or thread localness.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+                              Log2_32_Ceil(MaxGlobalType+1)));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // AddrSpace << 2
+                                                           //| explicitType << 1
+                                                           //| constant
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // Initializer.
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5)); // Linkage.
+    if (MaxAlignment == 0)                                 // Alignment.
+      Abbv->Add(BitCodeAbbrevOp(0));
+    else {
+      unsigned MaxEncAlignment = Log2_32(MaxAlignment)+1;
+      Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+                               Log2_32_Ceil(MaxEncAlignment+1)));
+    }
+    if (SectionMap.empty())                                    // Section.
+      Abbv->Add(BitCodeAbbrevOp(0));
+    else
+      Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+                               Log2_32_Ceil(SectionMap.size()+1)));
+    // Don't bother emitting vis + thread local.
+    SimpleGVarAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+  }
+
+  // Emit the global variable information.
+  SmallVector<unsigned, 64> Vals;
+  for (const GlobalVariable &GV : M.globals()) {
+    unsigned AbbrevToUse = 0;
+
+    // GLOBALVAR: [type, isconst, initid,
+    //             linkage, alignment, section, visibility, threadlocal,
+    //             unnamed_addr, externally_initialized, dllstorageclass,
+    //             comdat]
+    Vals.push_back(VE.getTypeID(GV.getValueType()));
+    Vals.push_back(GV.getType()->getAddressSpace() << 2 | 2 | GV.isConstant());
+    Vals.push_back(GV.isDeclaration() ? 0 :
+                   (VE.getValueID(GV.getInitializer()) + 1));
+    Vals.push_back(getEncodedLinkage(GV));
+    Vals.push_back(Log2_32(GV.getAlignment())+1);
+    Vals.push_back(GV.hasSection() ? SectionMap[GV.getSection()] : 0);
+    if (GV.isThreadLocal() ||
+        GV.getVisibility() != GlobalValue::DefaultVisibility ||
+        GV.getUnnamedAddr() != GlobalValue::UnnamedAddr::None ||
+        GV.isExternallyInitialized() ||
+        GV.getDLLStorageClass() != GlobalValue::DefaultStorageClass ||
+        GV.hasComdat()) {
+      Vals.push_back(getEncodedVisibility(GV));
+      Vals.push_back(getEncodedThreadLocalMode(GV));
+      Vals.push_back(getEncodedUnnamedAddr(GV));
+      Vals.push_back(GV.isExternallyInitialized());
+      Vals.push_back(getEncodedDLLStorageClass(GV));
+      Vals.push_back(GV.hasComdat() ? VE.getComdatID(GV.getComdat()) : 0);
+    } else {
+      AbbrevToUse = SimpleGVarAbbrev;
+    }
+
+    Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals, AbbrevToUse);
+    Vals.clear();
+  }
+
+  // Emit the function proto information.
+  for (const Function &F : M) {
+    // FUNCTION:  [type, callingconv, isproto, linkage, paramattrs, alignment,
+    //             section, visibility, gc, unnamed_addr, prologuedata,
+    //             dllstorageclass, comdat, prefixdata, personalityfn]
+    Vals.push_back(VE.getTypeID(F.getFunctionType()));
+    Vals.push_back(F.getCallingConv());
+    Vals.push_back(F.isDeclaration());
+    Vals.push_back(getEncodedLinkage(F));
+    Vals.push_back(VE.getAttributeID(F.getAttributes()));
+    Vals.push_back(Log2_32(F.getAlignment())+1);
+    Vals.push_back(F.hasSection() ? SectionMap[F.getSection()] : 0);
+    Vals.push_back(getEncodedVisibility(F));
+    Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0);
+    Vals.push_back(getEncodedUnnamedAddr(F));
+    Vals.push_back(F.hasPrologueData() ? (VE.getValueID(F.getPrologueData()) + 1)
+                                       : 0);
+    Vals.push_back(getEncodedDLLStorageClass(F));
+    Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0);
+    Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1)
+                                     : 0);
+    Vals.push_back(
+        F.hasPersonalityFn() ? (VE.getValueID(F.getPersonalityFn()) + 1) : 0);
+
+    unsigned AbbrevToUse = 0;
+    Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse);
+    Vals.clear();
+  }
+
+  // Emit the alias information.
+  for (const GlobalAlias &A : M.aliases()) {
+    // ALIAS: [alias type, aliasee val#, linkage, visibility, dllstorageclass,
+    //         threadlocal, unnamed_addr]
+    Vals.push_back(VE.getTypeID(A.getValueType()));
+    Vals.push_back(A.getType()->getAddressSpace());
+    Vals.push_back(VE.getValueID(A.getAliasee()));
+    Vals.push_back(getEncodedLinkage(A));
+    Vals.push_back(getEncodedVisibility(A));
+    Vals.push_back(getEncodedDLLStorageClass(A));
+    Vals.push_back(getEncodedThreadLocalMode(A));
+    Vals.push_back(getEncodedUnnamedAddr(A));
+    unsigned AbbrevToUse = 0;
+    Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals, AbbrevToUse);
+    Vals.clear();
+  }
+
+  // Emit the ifunc information.
+  for (const GlobalIFunc &I : M.ifuncs()) {
+    // IFUNC: [ifunc type, address space, resolver val#, linkage, visibility]
+    Vals.push_back(VE.getTypeID(I.getValueType()));
+    Vals.push_back(I.getType()->getAddressSpace());
+    Vals.push_back(VE.getValueID(I.getResolver()));
+    Vals.push_back(getEncodedLinkage(I));
+    Vals.push_back(getEncodedVisibility(I));
+    Stream.EmitRecord(bitc::MODULE_CODE_IFUNC, Vals);
+    Vals.clear();
+  }
+
+  // Emit the module's source file name.
+  {
+    StringEncoding Bits = getStringEncoding(M.getSourceFileName().data(),
+                                            M.getSourceFileName().size());
+    BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8);
+    if (Bits == SE_Char6)
+      AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6);
+    else if (Bits == SE_Fixed7)
+      AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7);
+
+    // MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(AbbrevOpToUse);
+    unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+    for (const auto P : M.getSourceFileName())
+      Vals.push_back((unsigned char)P);
+
+    // Emit the finished record.
+    Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev);
+    Vals.clear();
+  }
+
+  // If we have a VST, write the VSTOFFSET record placeholder.
+  if (M.getValueSymbolTable().empty())
+    return;
+  writeValueSymbolTableForwardDecl();
+}
+
+static uint64_t getOptimizationFlags(const Value *V) {
+  uint64_t Flags = 0;
+
+  if (const auto *OBO = dyn_cast<OverflowingBinaryOperator>(V)) {
+    if (OBO->hasNoSignedWrap())
+      Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP;
+    if (OBO->hasNoUnsignedWrap())
+      Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP;
+  } else if (const auto *PEO = dyn_cast<PossiblyExactOperator>(V)) {
+    if (PEO->isExact())
+      Flags |= 1 << bitc::PEO_EXACT;
+  } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) {
+    if (FPMO->hasUnsafeAlgebra())
+      Flags |= FastMathFlags::UnsafeAlgebra;
+    if (FPMO->hasNoNaNs())
+      Flags |= FastMathFlags::NoNaNs;
+    if (FPMO->hasNoInfs())
+      Flags |= FastMathFlags::NoInfs;
+    if (FPMO->hasNoSignedZeros())
+      Flags |= FastMathFlags::NoSignedZeros;
+    if (FPMO->hasAllowReciprocal())
+      Flags |= FastMathFlags::AllowReciprocal;
+  }
+
+  return Flags;
+}
+
+void ModuleBitcodeWriter::writeValueAsMetadata(
+    const ValueAsMetadata *MD, SmallVectorImpl<uint64_t> &Record) {
+  // Mimic an MDNode with a value as one operand.
+  Value *V = MD->getValue();
+  Record.push_back(VE.getTypeID(V->getType()));
+  Record.push_back(VE.getValueID(V));
+  Stream.EmitRecord(bitc::METADATA_VALUE, Record, 0);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeMDTuple(const MDTuple *N,
+                                       SmallVectorImpl<uint64_t> &Record,
+                                       unsigned Abbrev) {
+  for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+    Metadata *MD = N->getOperand(i);
+    assert(!(MD && isa<LocalAsMetadata>(MD)) &&
+           "Unexpected function-local metadata");
+    Record.push_back(VE.getMetadataOrNullID(MD));
+  }
+  Stream.EmitRecord(N->isDistinct() ? bitc::METADATA_DISTINCT_NODE
+                                    : bitc::METADATA_NODE,
+                    Record, Abbrev);
+  Record.clear();
+}
+
+unsigned ModuleBitcodeWriter::createDILocationAbbrev() {
+  // Assume the column is usually under 128, and always output the inlined-at
+  // location (it's never more expensive than building an array size 1).
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_LOCATION));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+void ModuleBitcodeWriter::writeDILocation(const DILocation *N,
+                                          SmallVectorImpl<uint64_t> &Record,
+                                          unsigned &Abbrev) {
+  if (!Abbrev)
+    Abbrev = createDILocationAbbrev();
+
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getLine());
+  Record.push_back(N->getColumn());
+  Record.push_back(VE.getMetadataID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getInlinedAt()));
+
+  Stream.EmitRecord(bitc::METADATA_LOCATION, Record, Abbrev);
+  Record.clear();
+}
+
+unsigned ModuleBitcodeWriter::createGenericDINodeAbbrev() {
+  // Assume the column is usually under 128, and always output the inlined-at
+  // location (it's never more expensive than building an array size 1).
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_GENERIC_DEBUG));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+void ModuleBitcodeWriter::writeGenericDINode(const GenericDINode *N,
+                                             SmallVectorImpl<uint64_t> &Record,
+                                             unsigned &Abbrev) {
+  if (!Abbrev)
+    Abbrev = createGenericDINodeAbbrev();
+
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getTag());
+  Record.push_back(0); // Per-tag version field; unused for now.
+
+  for (auto &I : N->operands())
+    Record.push_back(VE.getMetadataOrNullID(I));
+
+  Stream.EmitRecord(bitc::METADATA_GENERIC_DEBUG, Record, Abbrev);
+  Record.clear();
+}
+
+static uint64_t rotateSign(int64_t I) {
+  uint64_t U = I;
+  return I < 0 ? ~(U << 1) : U << 1;
+}
+
+void ModuleBitcodeWriter::writeDISubrange(const DISubrange *N,
+                                          SmallVectorImpl<uint64_t> &Record,
+                                          unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getCount());
+  Record.push_back(rotateSign(N->getLowerBound()));
+
+  Stream.EmitRecord(bitc::METADATA_SUBRANGE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIEnumerator(const DIEnumerator *N,
+                                            SmallVectorImpl<uint64_t> &Record,
+                                            unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(rotateSign(N->getValue()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+
+  Stream.EmitRecord(bitc::METADATA_ENUMERATOR, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIBasicType(const DIBasicType *N,
+                                           SmallVectorImpl<uint64_t> &Record,
+                                           unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getTag());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(N->getSizeInBits());
+  Record.push_back(N->getAlignInBits());
+  Record.push_back(N->getEncoding());
+
+  Stream.EmitRecord(bitc::METADATA_BASIC_TYPE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIDerivedType(const DIDerivedType *N,
+                                             SmallVectorImpl<uint64_t> &Record,
+                                             unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getTag());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getBaseType()));
+  Record.push_back(N->getSizeInBits());
+  Record.push_back(N->getAlignInBits());
+  Record.push_back(N->getOffsetInBits());
+  Record.push_back(N->getFlags());
+  Record.push_back(VE.getMetadataOrNullID(N->getExtraData()));
+
+  Stream.EmitRecord(bitc::METADATA_DERIVED_TYPE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDICompositeType(
+    const DICompositeType *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  const unsigned IsNotUsedInOldTypeRef = 0x2;
+  Record.push_back(IsNotUsedInOldTypeRef | (unsigned)N->isDistinct());
+  Record.push_back(N->getTag());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getBaseType()));
+  Record.push_back(N->getSizeInBits());
+  Record.push_back(N->getAlignInBits());
+  Record.push_back(N->getOffsetInBits());
+  Record.push_back(N->getFlags());
+  Record.push_back(VE.getMetadataOrNullID(N->getElements().get()));
+  Record.push_back(N->getRuntimeLang());
+  Record.push_back(VE.getMetadataOrNullID(N->getVTableHolder()));
+  Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawIdentifier()));
+
+  Stream.EmitRecord(bitc::METADATA_COMPOSITE_TYPE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDISubroutineType(
+    const DISubroutineType *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  const unsigned HasNoOldTypeRefs = 0x2;
+  Record.push_back(HasNoOldTypeRefs | (unsigned)N->isDistinct());
+  Record.push_back(N->getFlags());
+  Record.push_back(VE.getMetadataOrNullID(N->getTypeArray().get()));
+  Record.push_back(N->getCC());
+
+  Stream.EmitRecord(bitc::METADATA_SUBROUTINE_TYPE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIFile(const DIFile *N,
+                                      SmallVectorImpl<uint64_t> &Record,
+                                      unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawFilename()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawDirectory()));
+  Record.push_back(N->getChecksumKind());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawChecksum()));
+
+  Stream.EmitRecord(bitc::METADATA_FILE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDICompileUnit(const DICompileUnit *N,
+                                             SmallVectorImpl<uint64_t> &Record,
+                                             unsigned Abbrev) {
+  assert(N->isDistinct() && "Expected distinct compile units");
+  Record.push_back(/* IsDistinct */ true);
+  Record.push_back(N->getSourceLanguage());
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawProducer()));
+  Record.push_back(N->isOptimized());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawFlags()));
+  Record.push_back(N->getRuntimeVersion());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawSplitDebugFilename()));
+  Record.push_back(N->getEmissionKind());
+  Record.push_back(VE.getMetadataOrNullID(N->getEnumTypes().get()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRetainedTypes().get()));
+  Record.push_back(/* subprograms */ 0);
+  Record.push_back(VE.getMetadataOrNullID(N->getGlobalVariables().get()));
+  Record.push_back(VE.getMetadataOrNullID(N->getImportedEntities().get()));
+  Record.push_back(N->getDWOId());
+  Record.push_back(VE.getMetadataOrNullID(N->getMacros().get()));
+  Record.push_back(N->getSplitDebugInlining());
+
+  Stream.EmitRecord(bitc::METADATA_COMPILE_UNIT, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDISubprogram(const DISubprogram *N,
+                                            SmallVectorImpl<uint64_t> &Record,
+                                            unsigned Abbrev) {
+  uint64_t HasUnitFlag = 1 << 1;
+  Record.push_back(N->isDistinct() | HasUnitFlag);
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getType()));
+  Record.push_back(N->isLocalToUnit());
+  Record.push_back(N->isDefinition());
+  Record.push_back(N->getScopeLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getContainingType()));
+  Record.push_back(N->getVirtuality());
+  Record.push_back(N->getVirtualIndex());
+  Record.push_back(N->getFlags());
+  Record.push_back(N->isOptimized());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawUnit()));
+  Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get()));
+  Record.push_back(VE.getMetadataOrNullID(N->getDeclaration()));
+  Record.push_back(VE.getMetadataOrNullID(N->getVariables().get()));
+  Record.push_back(N->getThisAdjustment());
+
+  Stream.EmitRecord(bitc::METADATA_SUBPROGRAM, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDILexicalBlock(const DILexicalBlock *N,
+                                              SmallVectorImpl<uint64_t> &Record,
+                                              unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getLine());
+  Record.push_back(N->getColumn());
+
+  Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDILexicalBlockFile(
+    const DILexicalBlockFile *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getDiscriminator());
+
+  Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK_FILE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDINamespace(const DINamespace *N,
+                                           SmallVectorImpl<uint64_t> &Record,
+                                           unsigned Abbrev) {
+  Record.push_back(N->isDistinct() | N->getExportSymbols() << 1);
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(N->getLine());
+
+  Stream.EmitRecord(bitc::METADATA_NAMESPACE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIMacro(const DIMacro *N,
+                                       SmallVectorImpl<uint64_t> &Record,
+                                       unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getMacinfoType());
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawValue()));
+
+  Stream.EmitRecord(bitc::METADATA_MACRO, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIMacroFile(const DIMacroFile *N,
+                                           SmallVectorImpl<uint64_t> &Record,
+                                           unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getMacinfoType());
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(VE.getMetadataOrNullID(N->getElements().get()));
+
+  Stream.EmitRecord(bitc::METADATA_MACRO_FILE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIModule(const DIModule *N,
+                                        SmallVectorImpl<uint64_t> &Record,
+                                        unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  for (auto &I : N->operands())
+    Record.push_back(VE.getMetadataOrNullID(I));
+
+  Stream.EmitRecord(bitc::METADATA_MODULE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDITemplateTypeParameter(
+    const DITemplateTypeParameter *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getType()));
+
+  Stream.EmitRecord(bitc::METADATA_TEMPLATE_TYPE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDITemplateValueParameter(
+    const DITemplateValueParameter *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getTag());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getType()));
+  Record.push_back(VE.getMetadataOrNullID(N->getValue()));
+
+  Stream.EmitRecord(bitc::METADATA_TEMPLATE_VALUE, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIGlobalVariable(
+    const DIGlobalVariable *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  const uint64_t Version = 1 << 1;
+  Record.push_back((uint64_t)N->isDistinct() | Version);
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getType()));
+  Record.push_back(N->isLocalToUnit());
+  Record.push_back(N->isDefinition());
+  Record.push_back(/* expr */ 0);
+  Record.push_back(VE.getMetadataOrNullID(N->getStaticDataMemberDeclaration()));
+  Record.push_back(N->getAlignInBits());
+
+  Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDILocalVariable(
+    const DILocalVariable *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  // In order to support all possible bitcode formats in BitcodeReader we need
+  // to distinguish the following cases:
+  // 1) Record has no artificial tag (Record[1]),
+  //   has no obsolete inlinedAt field (Record[9]).
+  //   In this case Record size will be 8, HasAlignment flag is false.
+  // 2) Record has artificial tag (Record[1]),
+  //   has no obsolete inlignedAt field (Record[9]).
+  //   In this case Record size will be 9, HasAlignment flag is false.
+  // 3) Record has both artificial tag (Record[1]) and
+  //   obsolete inlignedAt field (Record[9]).
+  //   In this case Record size will be 10, HasAlignment flag is false.
+  // 4) Record has neither artificial tag, nor inlignedAt field, but
+  //   HasAlignment flag is true and Record[8] contains alignment value.
+  const uint64_t HasAlignmentFlag = 1 << 1;
+  Record.push_back((uint64_t)N->isDistinct() | HasAlignmentFlag);
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getType()));
+  Record.push_back(N->getArg());
+  Record.push_back(N->getFlags());
+  Record.push_back(N->getAlignInBits());
+
+  Stream.EmitRecord(bitc::METADATA_LOCAL_VAR, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIExpression(const DIExpression *N,
+                                            SmallVectorImpl<uint64_t> &Record,
+                                            unsigned Abbrev) {
+  Record.reserve(N->getElements().size() + 1);
+
+  const uint64_t HasOpFragmentFlag = 1 << 1;
+  Record.push_back((uint64_t)N->isDistinct() | HasOpFragmentFlag);
+  Record.append(N->elements_begin(), N->elements_end());
+
+  Stream.EmitRecord(bitc::METADATA_EXPRESSION, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIGlobalVariableExpression(
+    const DIGlobalVariableExpression *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(VE.getMetadataOrNullID(N->getVariable()));
+  Record.push_back(VE.getMetadataOrNullID(N->getExpression()));
+  
+  Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR_EXPR, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIObjCProperty(const DIObjCProperty *N,
+                                              SmallVectorImpl<uint64_t> &Record,
+                                              unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawSetterName()));
+  Record.push_back(VE.getMetadataOrNullID(N->getRawGetterName()));
+  Record.push_back(N->getAttributes());
+  Record.push_back(VE.getMetadataOrNullID(N->getType()));
+
+  Stream.EmitRecord(bitc::METADATA_OBJC_PROPERTY, Record, Abbrev);
+  Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIImportedEntity(
+    const DIImportedEntity *N, SmallVectorImpl<uint64_t> &Record,
+    unsigned Abbrev) {
+  Record.push_back(N->isDistinct());
+  Record.push_back(N->getTag());
+  Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+  Record.push_back(VE.getMetadataOrNullID(N->getEntity()));
+  Record.push_back(N->getLine());
+  Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+
+  Stream.EmitRecord(bitc::METADATA_IMPORTED_ENTITY, Record, Abbrev);
+  Record.clear();
+}
+
+unsigned ModuleBitcodeWriter::createNamedMetadataAbbrev() {
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_NAME));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+  return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+void ModuleBitcodeWriter::writeNamedMetadata(
+    SmallVectorImpl<uint64_t> &Record) {
+  if (M.named_metadata_empty())
+    return;
+
+  unsigned Abbrev = createNamedMetadataAbbrev();
+  for (const NamedMDNode &NMD : M.named_metadata()) {
+    // Write name.
+    StringRef Str = NMD.getName();
+    Record.append(Str.bytes_begin(), Str.bytes_end());
+    Stream.EmitRecord(bitc::METADATA_NAME, Record, Abbrev);
+    Record.clear();
+
+    // Write named metadata operands.
+    for (const MDNode *N : NMD.operands())
+      Record.push_back(VE.getMetadataID(N));
+    Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0);
+    Record.clear();
+  }
+}
+
+unsigned ModuleBitcodeWriter::createMetadataStringsAbbrev() {
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRINGS));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of strings
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // offset to chars
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+  return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+/// Write out a record for MDString.
+///
+/// All the metadata strings in a metadata block are emitted in a single
+/// record.  The sizes and strings themselves are shoved into a blob.
+void ModuleBitcodeWriter::writeMetadataStrings(
+    ArrayRef<const Metadata *> Strings, SmallVectorImpl<uint64_t> &Record) {
+  if (Strings.empty())
+    return;
+
+  // Start the record with the number of strings.
+  Record.push_back(bitc::METADATA_STRINGS);
+  Record.push_back(Strings.size());
+
+  // Emit the sizes of the strings in the blob.
+  SmallString<256> Blob;
+  {
+    BitstreamWriter W(Blob);
+    for (const Metadata *MD : Strings)
+      W.EmitVBR(cast<MDString>(MD)->getLength(), 6);
+    W.FlushToWord();
+  }
+
+  // Add the offset to the strings to the record.
+  Record.push_back(Blob.size());
+
+  // Add the strings to the blob.
+  for (const Metadata *MD : Strings)
+    Blob.append(cast<MDString>(MD)->getString());
+
+  // Emit the final record.
+  Stream.EmitRecordWithBlob(createMetadataStringsAbbrev(), Record, Blob);
+  Record.clear();
+}
+
+// Generates an enum to use as an index in the Abbrev array of Metadata record.
+enum MetadataAbbrev : unsigned {
+#define HANDLE_MDNODE_LEAF(CLASS) CLASS##AbbrevID,
+#include "llvm/IR/Metadata.def"
+  LastPlusOne
+};
+
+void ModuleBitcodeWriter::writeMetadataRecords(
+    ArrayRef<const Metadata *> MDs, SmallVectorImpl<uint64_t> &Record,
+    std::vector<unsigned> *MDAbbrevs, std::vector<uint64_t> *IndexPos) {
+  if (MDs.empty())
+    return;
+
+  // Initialize MDNode abbreviations.
+#define HANDLE_MDNODE_LEAF(CLASS) unsigned CLASS##Abbrev = 0;
+#include "llvm/IR/Metadata.def"
+
+  for (const Metadata *MD : MDs) {
+    if (IndexPos)
+      IndexPos->push_back(Stream.GetCurrentBitNo());
+    if (const MDNode *N = dyn_cast<MDNode>(MD)) {
+      assert(N->isResolved() && "Expected forward references to be resolved");
+
+      switch (N->getMetadataID()) {
+      default:
+        llvm_unreachable("Invalid MDNode subclass");
+#define HANDLE_MDNODE_LEAF(CLASS)                                              \
+  case Metadata::CLASS##Kind:                                                  \
+    if (MDAbbrevs)                                                             \
+      write##CLASS(cast<CLASS>(N), Record,                                     \
+                   (*MDAbbrevs)[MetadataAbbrev::CLASS##AbbrevID]);             \
+    else                                                                       \
+      write##CLASS(cast<CLASS>(N), Record, CLASS##Abbrev);                     \
+    continue;
+#include "llvm/IR/Metadata.def"
+      }
+    }
+    writeValueAsMetadata(cast<ValueAsMetadata>(MD), Record);
+  }
+}
+
+void ModuleBitcodeWriter::writeModuleMetadata() {
+  if (!VE.hasMDs() && M.named_metadata_empty())
+    return;
+
+  Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 4);
+  SmallVector<uint64_t, 64> Record;
+
+  // Emit all abbrevs upfront, so that the reader can jump in the middle of the
+  // block and load any metadata.
+  std::vector<unsigned> MDAbbrevs;
+
+  MDAbbrevs.resize(MetadataAbbrev::LastPlusOne);
+  MDAbbrevs[MetadataAbbrev::DILocationAbbrevID] = createDILocationAbbrev();
+  MDAbbrevs[MetadataAbbrev::GenericDINodeAbbrevID] =
+      createGenericDINodeAbbrev();
+
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_INDEX_OFFSET));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  unsigned OffsetAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_INDEX));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  unsigned IndexAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Emit MDStrings together upfront.
+  writeMetadataStrings(VE.getMDStrings(), Record);
+
+  // We only emit an index for the metadata record if we have more than a given
+  // (naive) threshold of metadatas, otherwise it is not worth it.
+  if (VE.getNonMDStrings().size() > IndexThreshold) {
+    // Write a placeholder value in for the offset of the metadata index,
+    // which is written after the records, so that it can include
+    // the offset of each entry. The placeholder offset will be
+    // updated after all records are emitted.
+    uint64_t Vals[] = {0, 0};
+    Stream.EmitRecord(bitc::METADATA_INDEX_OFFSET, Vals, OffsetAbbrev);
+  }
+
+  // Compute and save the bit offset to the current position, which will be
+  // patched when we emit the index later. We can simply subtract the 64-bit
+  // fixed size from the current bit number to get the location to backpatch.
+  uint64_t IndexOffsetRecordBitPos = Stream.GetCurrentBitNo();
+
+  // This index will contain the bitpos for each individual record.
+  std::vector<uint64_t> IndexPos;
+  IndexPos.reserve(VE.getNonMDStrings().size());
+
+  // Write all the records
+  writeMetadataRecords(VE.getNonMDStrings(), Record, &MDAbbrevs, &IndexPos);
+
+  if (VE.getNonMDStrings().size() > IndexThreshold) {
+    // Now that we have emitted all the records we will emit the index. But
+    // first
+    // backpatch the forward reference so that the reader can skip the records
+    // efficiently.
+    Stream.BackpatchWord64(IndexOffsetRecordBitPos - 64,
+                           Stream.GetCurrentBitNo() - IndexOffsetRecordBitPos);
+
+    // Delta encode the index.
+    uint64_t PreviousValue = IndexOffsetRecordBitPos;
+    for (auto &Elt : IndexPos) {
+      auto EltDelta = Elt - PreviousValue;
+      PreviousValue = Elt;
+      Elt = EltDelta;
+    }
+    // Emit the index record.
+    Stream.EmitRecord(bitc::METADATA_INDEX, IndexPos, IndexAbbrev);
+    IndexPos.clear();
+  }
+
+  // Write the named metadata now.
+  writeNamedMetadata(Record);
+
+  auto AddDeclAttachedMetadata = [&](const GlobalObject &GO) {
+    SmallVector<uint64_t, 4> Record;
+    Record.push_back(VE.getValueID(&GO));
+    pushGlobalMetadataAttachment(Record, GO);
+    Stream.EmitRecord(bitc::METADATA_GLOBAL_DECL_ATTACHMENT, Record);
+  };
+  for (const Function &F : M)
+    if (F.isDeclaration() && F.hasMetadata())
+      AddDeclAttachedMetadata(F);
+  // FIXME: Only store metadata for declarations here, and move data for global
+  // variable definitions to a separate block (PR28134).
+  for (const GlobalVariable &GV : M.globals())
+    if (GV.hasMetadata())
+      AddDeclAttachedMetadata(GV);
+
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeFunctionMetadata(const Function &F) {
+  if (!VE.hasMDs())
+    return;
+
+  Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
+  SmallVector<uint64_t, 64> Record;
+  writeMetadataStrings(VE.getMDStrings(), Record);
+  writeMetadataRecords(VE.getNonMDStrings(), Record);
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::pushGlobalMetadataAttachment(
+    SmallVectorImpl<uint64_t> &Record, const GlobalObject &GO) {
+  // [n x [id, mdnode]]
+  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+  GO.getAllMetadata(MDs);
+  for (const auto &I : MDs) {
+    Record.push_back(I.first);
+    Record.push_back(VE.getMetadataID(I.second));
+  }
+}
+
+void ModuleBitcodeWriter::writeFunctionMetadataAttachment(const Function &F) {
+  Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3);
+
+  SmallVector<uint64_t, 64> Record;
+
+  if (F.hasMetadata()) {
+    pushGlobalMetadataAttachment(Record, F);
+    Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
+    Record.clear();
+  }
+
+  // Write metadata attachments
+  // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]]
+  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+  for (const BasicBlock &BB : F)
+    for (const Instruction &I : BB) {
+      MDs.clear();
+      I.getAllMetadataOtherThanDebugLoc(MDs);
+
+      // If no metadata, ignore instruction.
+      if (MDs.empty()) continue;
+
+      Record.push_back(VE.getInstructionID(&I));
+
+      for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
+        Record.push_back(MDs[i].first);
+        Record.push_back(VE.getMetadataID(MDs[i].second));
+      }
+      Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
+      Record.clear();
+    }
+
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeModuleMetadataKinds() {
+  SmallVector<uint64_t, 64> Record;
+
+  // Write metadata kinds
+  // METADATA_KIND - [n x [id, name]]
+  SmallVector<StringRef, 8> Names;
+  M.getMDKindNames(Names);
+
+  if (Names.empty()) return;
+
+  Stream.EnterSubblock(bitc::METADATA_KIND_BLOCK_ID, 3);
+
+  for (unsigned MDKindID = 0, e = Names.size(); MDKindID != e; ++MDKindID) {
+    Record.push_back(MDKindID);
+    StringRef KName = Names[MDKindID];
+    Record.append(KName.begin(), KName.end());
+
+    Stream.EmitRecord(bitc::METADATA_KIND, Record, 0);
+    Record.clear();
+  }
+
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeOperandBundleTags() {
+  // Write metadata kinds
+  //
+  // OPERAND_BUNDLE_TAGS_BLOCK_ID : N x OPERAND_BUNDLE_TAG
+  //
+  // OPERAND_BUNDLE_TAG - [strchr x N]
+
+  SmallVector<StringRef, 8> Tags;
+  M.getOperandBundleTags(Tags);
+
+  if (Tags.empty())
+    return;
+
+  Stream.EnterSubblock(bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID, 3);
+
+  SmallVector<uint64_t, 64> Record;
+
+  for (auto Tag : Tags) {
+    Record.append(Tag.begin(), Tag.end());
+
+    Stream.EmitRecord(bitc::OPERAND_BUNDLE_TAG, Record, 0);
+    Record.clear();
+  }
+
+  Stream.ExitBlock();
+}
+
+static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
+  if ((int64_t)V >= 0)
+    Vals.push_back(V << 1);
+  else
+    Vals.push_back((-V << 1) | 1);
+}
+
+void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
+                                         bool isGlobal) {
+  if (FirstVal == LastVal) return;
+
+  Stream.EnterSubblock(bitc::CONSTANTS_BLOCK_ID, 4);
+
+  unsigned AggregateAbbrev = 0;
+  unsigned String8Abbrev = 0;
+  unsigned CString7Abbrev = 0;
+  unsigned CString6Abbrev = 0;
+  // If this is a constant pool for the module, emit module-specific abbrevs.
+  if (isGlobal) {
+    // Abbrev for CST_CODE_AGGREGATE.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_AGGREGATE));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(LastVal+1)));
+    AggregateAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+    // Abbrev for CST_CODE_STRING.
+    Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_STRING));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+    String8Abbrev = Stream.EmitAbbrev(std::move(Abbv));
+    // Abbrev for CST_CODE_CSTRING.
+    Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
+    CString7Abbrev = Stream.EmitAbbrev(std::move(Abbv));
+    // Abbrev for CST_CODE_CSTRING.
+    Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+    CString6Abbrev = Stream.EmitAbbrev(std::move(Abbv));
+  }
+
+  SmallVector<uint64_t, 64> Record;
+
+  const ValueEnumerator::ValueList &Vals = VE.getValues();
+  Type *LastTy = nullptr;
+  for (unsigned i = FirstVal; i != LastVal; ++i) {
+    const Value *V = Vals[i].first;
+    // If we need to switch types, do so now.
+    if (V->getType() != LastTy) {
+      LastTy = V->getType();
+      Record.push_back(VE.getTypeID(LastTy));
+      Stream.EmitRecord(bitc::CST_CODE_SETTYPE, Record,
+                        CONSTANTS_SETTYPE_ABBREV);
+      Record.clear();
+    }
+
+    if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
+      Record.push_back(unsigned(IA->hasSideEffects()) |
+                       unsigned(IA->isAlignStack()) << 1 |
+                       unsigned(IA->getDialect()&1) << 2);
+
+      // Add the asm string.
+      const std::string &AsmStr = IA->getAsmString();
+      Record.push_back(AsmStr.size());
+      Record.append(AsmStr.begin(), AsmStr.end());
+
+      // Add the constraint string.
+      const std::string &ConstraintStr = IA->getConstraintString();
+      Record.push_back(ConstraintStr.size());
+      Record.append(ConstraintStr.begin(), ConstraintStr.end());
+      Stream.EmitRecord(bitc::CST_CODE_INLINEASM, Record);
+      Record.clear();
+      continue;
+    }
+    const Constant *C = cast<Constant>(V);
+    unsigned Code = -1U;
+    unsigned AbbrevToUse = 0;
+    if (C->isNullValue()) {
+      Code = bitc::CST_CODE_NULL;
+    } else if (isa<UndefValue>(C)) {
+      Code = bitc::CST_CODE_UNDEF;
+    } else if (const ConstantInt *IV = dyn_cast<ConstantInt>(C)) {
+      if (IV->getBitWidth() <= 64) {
+        uint64_t V = IV->getSExtValue();
+        emitSignedInt64(Record, V);
+        Code = bitc::CST_CODE_INTEGER;
+        AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
+      } else {                             // Wide integers, > 64 bits in size.
+        // We have an arbitrary precision integer value to write whose
+        // bit width is > 64. However, in canonical unsigned integer
+        // format it is likely that the high bits are going to be zero.
+        // So, we only write the number of active words.
+        unsigned NWords = IV->getValue().getActiveWords();
+        const uint64_t *RawWords = IV->getValue().getRawData();
+        for (unsigned i = 0; i != NWords; ++i) {
+          emitSignedInt64(Record, RawWords[i]);
+        }
+        Code = bitc::CST_CODE_WIDE_INTEGER;
+      }
+    } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+      Code = bitc::CST_CODE_FLOAT;
+      Type *Ty = CFP->getType();
+      if (Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy()) {
+        Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
+      } else if (Ty->isX86_FP80Ty()) {
+        // api needed to prevent premature destruction
+        // bits are not in the same order as a normal i80 APInt, compensate.
+        APInt api = CFP->getValueAPF().bitcastToAPInt();
+        const uint64_t *p = api.getRawData();
+        Record.push_back((p[1] << 48) | (p[0] >> 16));
+        Record.push_back(p[0] & 0xffffLL);
+      } else if (Ty->isFP128Ty() || Ty->isPPC_FP128Ty()) {
+        APInt api = CFP->getValueAPF().bitcastToAPInt();
+        const uint64_t *p = api.getRawData();
+        Record.push_back(p[0]);
+        Record.push_back(p[1]);
+      } else {
+        assert (0 && "Unknown FP type!");
+      }
+    } else if (isa<ConstantDataSequential>(C) &&
+               cast<ConstantDataSequential>(C)->isString()) {
+      const ConstantDataSequential *Str = cast<ConstantDataSequential>(C);
+      // Emit constant strings specially.
+      unsigned NumElts = Str->getNumElements();
+      // If this is a null-terminated string, use the denser CSTRING encoding.
+      if (Str->isCString()) {
+        Code = bitc::CST_CODE_CSTRING;
+        --NumElts;  // Don't encode the null, which isn't allowed by char6.
+      } else {
+        Code = bitc::CST_CODE_STRING;
+        AbbrevToUse = String8Abbrev;
+      }
+      bool isCStr7 = Code == bitc::CST_CODE_CSTRING;
+      bool isCStrChar6 = Code == bitc::CST_CODE_CSTRING;
+      for (unsigned i = 0; i != NumElts; ++i) {
+        unsigned char V = Str->getElementAsInteger(i);
+        Record.push_back(V);
+        isCStr7 &= (V & 128) == 0;
+        if (isCStrChar6)
+          isCStrChar6 = BitCodeAbbrevOp::isChar6(V);
+      }
+
+      if (isCStrChar6)
+        AbbrevToUse = CString6Abbrev;
+      else if (isCStr7)
+        AbbrevToUse = CString7Abbrev;
+    } else if (const ConstantDataSequential *CDS =
+                  dyn_cast<ConstantDataSequential>(C)) {
+      Code = bitc::CST_CODE_DATA;
+      Type *EltTy = CDS->getType()->getElementType();
+      if (isa<IntegerType>(EltTy)) {
+        for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i)
+          Record.push_back(CDS->getElementAsInteger(i));
+      } else {
+        for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i)
+          Record.push_back(
+              CDS->getElementAsAPFloat(i).bitcastToAPInt().getLimitedValue());
+      }
+    } else if (isa<ConstantAggregate>(C)) {
+      Code = bitc::CST_CODE_AGGREGATE;
+      for (const Value *Op : C->operands())
+        Record.push_back(VE.getValueID(Op));
+      AbbrevToUse = AggregateAbbrev;
+    } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+      switch (CE->getOpcode()) {
+      default:
+        if (Instruction::isCast(CE->getOpcode())) {
+          Code = bitc::CST_CODE_CE_CAST;
+          Record.push_back(getEncodedCastOpcode(CE->getOpcode()));
+          Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+          Record.push_back(VE.getValueID(C->getOperand(0)));
+          AbbrevToUse = CONSTANTS_CE_CAST_Abbrev;
+        } else {
+          assert(CE->getNumOperands() == 2 && "Unknown constant expr!");
+          Code = bitc::CST_CODE_CE_BINOP;
+          Record.push_back(getEncodedBinaryOpcode(CE->getOpcode()));
+          Record.push_back(VE.getValueID(C->getOperand(0)));
+          Record.push_back(VE.getValueID(C->getOperand(1)));
+          uint64_t Flags = getOptimizationFlags(CE);
+          if (Flags != 0)
+            Record.push_back(Flags);
+        }
+        break;
+      case Instruction::GetElementPtr: {
+        Code = bitc::CST_CODE_CE_GEP;
+        const auto *GO = cast<GEPOperator>(C);
+        Record.push_back(VE.getTypeID(GO->getSourceElementType()));
+        if (Optional<unsigned> Idx = GO->getInRangeIndex()) {
+          Code = bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX;
+          Record.push_back((*Idx << 1) | GO->isInBounds());
+        } else if (GO->isInBounds())
+          Code = bitc::CST_CODE_CE_INBOUNDS_GEP;
+        for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
+          Record.push_back(VE.getTypeID(C->getOperand(i)->getType()));
+          Record.push_back(VE.getValueID(C->getOperand(i)));
+        }
+        break;
+      }
+      case Instruction::Select:
+        Code = bitc::CST_CODE_CE_SELECT;
+        Record.push_back(VE.getValueID(C->getOperand(0)));
+        Record.push_back(VE.getValueID(C->getOperand(1)));
+        Record.push_back(VE.getValueID(C->getOperand(2)));
+        break;
+      case Instruction::ExtractElement:
+        Code = bitc::CST_CODE_CE_EXTRACTELT;
+        Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+        Record.push_back(VE.getValueID(C->getOperand(0)));
+        Record.push_back(VE.getTypeID(C->getOperand(1)->getType()));
+        Record.push_back(VE.getValueID(C->getOperand(1)));
+        break;
+      case Instruction::InsertElement:
+        Code = bitc::CST_CODE_CE_INSERTELT;
+        Record.push_back(VE.getValueID(C->getOperand(0)));
+        Record.push_back(VE.getValueID(C->getOperand(1)));
+        Record.push_back(VE.getTypeID(C->getOperand(2)->getType()));
+        Record.push_back(VE.getValueID(C->getOperand(2)));
+        break;
+      case Instruction::ShuffleVector:
+        // If the return type and argument types are the same, this is a
+        // standard shufflevector instruction.  If the types are different,
+        // then the shuffle is widening or truncating the input vectors, and
+        // the argument type must also be encoded.
+        if (C->getType() == C->getOperand(0)->getType()) {
+          Code = bitc::CST_CODE_CE_SHUFFLEVEC;
+        } else {
+          Code = bitc::CST_CODE_CE_SHUFVEC_EX;
+          Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+        }
+        Record.push_back(VE.getValueID(C->getOperand(0)));
+        Record.push_back(VE.getValueID(C->getOperand(1)));
+        Record.push_back(VE.getValueID(C->getOperand(2)));
+        break;
+      case Instruction::ICmp:
+      case Instruction::FCmp:
+        Code = bitc::CST_CODE_CE_CMP;
+        Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+        Record.push_back(VE.getValueID(C->getOperand(0)));
+        Record.push_back(VE.getValueID(C->getOperand(1)));
+        Record.push_back(CE->getPredicate());
+        break;
+      }
+    } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
+      Code = bitc::CST_CODE_BLOCKADDRESS;
+      Record.push_back(VE.getTypeID(BA->getFunction()->getType()));
+      Record.push_back(VE.getValueID(BA->getFunction()));
+      Record.push_back(VE.getGlobalBasicBlockID(BA->getBasicBlock()));
+    } else {
+#ifndef NDEBUG
+      C->dump();
+#endif
+      llvm_unreachable("Unknown constant!");
+    }
+    Stream.EmitRecord(Code, Record, AbbrevToUse);
+    Record.clear();
+  }
+
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeModuleConstants() {
+  const ValueEnumerator::ValueList &Vals = VE.getValues();
+
+  // Find the first constant to emit, which is the first non-globalvalue value.
+  // We know globalvalues have been emitted by WriteModuleInfo.
+  for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+    if (!isa<GlobalValue>(Vals[i].first)) {
+      writeConstants(i, Vals.size(), true);
+      return;
+    }
+  }
+}
+
+/// pushValueAndType - The file has to encode both the value and type id for
+/// many values, because we need to know what type to create for forward
+/// references.  However, most operands are not forward references, so this type
+/// field is not needed.
+///
+/// This function adds V's value ID to Vals.  If the value ID is higher than the
+/// instruction ID, then it is a forward reference, and it also includes the
+/// type ID.  The value ID that is written is encoded relative to the InstID.
+bool ModuleBitcodeWriter::pushValueAndType(const Value *V, unsigned InstID,
+                                           SmallVectorImpl<unsigned> &Vals) {
+  unsigned ValID = VE.getValueID(V);
+  // Make encoding relative to the InstID.
+  Vals.push_back(InstID - ValID);
+  if (ValID >= InstID) {
+    Vals.push_back(VE.getTypeID(V->getType()));
+    return true;
+  }
+  return false;
+}
+
+void ModuleBitcodeWriter::writeOperandBundles(ImmutableCallSite CS,
+                                              unsigned InstID) {
+  SmallVector<unsigned, 64> Record;
+  LLVMContext &C = CS.getInstruction()->getContext();
+
+  for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
+    const auto &Bundle = CS.getOperandBundleAt(i);
+    Record.push_back(C.getOperandBundleTagID(Bundle.getTagName()));
+
+    for (auto &Input : Bundle.Inputs)
+      pushValueAndType(Input, InstID, Record);
+
+    Stream.EmitRecord(bitc::FUNC_CODE_OPERAND_BUNDLE, Record);
+    Record.clear();
+  }
+}
+
+/// pushValue - Like pushValueAndType, but where the type of the value is
+/// omitted (perhaps it was already encoded in an earlier operand).
+void ModuleBitcodeWriter::pushValue(const Value *V, unsigned InstID,
+                                    SmallVectorImpl<unsigned> &Vals) {
+  unsigned ValID = VE.getValueID(V);
+  Vals.push_back(InstID - ValID);
+}
+
+void ModuleBitcodeWriter::pushValueSigned(const Value *V, unsigned InstID,
+                                          SmallVectorImpl<uint64_t> &Vals) {
+  unsigned ValID = VE.getValueID(V);
+  int64_t diff = ((int32_t)InstID - (int32_t)ValID);
+  emitSignedInt64(Vals, diff);
+}
+
+/// WriteInstruction - Emit an instruction to the specified stream.
+void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
+                                           unsigned InstID,
+                                           SmallVectorImpl<unsigned> &Vals) {
+  unsigned Code = 0;
+  unsigned AbbrevToUse = 0;
+  VE.setInstructionID(&I);
+  switch (I.getOpcode()) {
+  default:
+    if (Instruction::isCast(I.getOpcode())) {
+      Code = bitc::FUNC_CODE_INST_CAST;
+      if (!pushValueAndType(I.getOperand(0), InstID, Vals))
+        AbbrevToUse = FUNCTION_INST_CAST_ABBREV;
+      Vals.push_back(VE.getTypeID(I.getType()));
+      Vals.push_back(getEncodedCastOpcode(I.getOpcode()));
+    } else {
+      assert(isa<BinaryOperator>(I) && "Unknown instruction!");
+      Code = bitc::FUNC_CODE_INST_BINOP;
+      if (!pushValueAndType(I.getOperand(0), InstID, Vals))
+        AbbrevToUse = FUNCTION_INST_BINOP_ABBREV;
+      pushValue(I.getOperand(1), InstID, Vals);
+      Vals.push_back(getEncodedBinaryOpcode(I.getOpcode()));
+      uint64_t Flags = getOptimizationFlags(&I);
+      if (Flags != 0) {
+        if (AbbrevToUse == FUNCTION_INST_BINOP_ABBREV)
+          AbbrevToUse = FUNCTION_INST_BINOP_FLAGS_ABBREV;
+        Vals.push_back(Flags);
+      }
+    }
+    break;
+
+  case Instruction::GetElementPtr: {
+    Code = bitc::FUNC_CODE_INST_GEP;
+    AbbrevToUse = FUNCTION_INST_GEP_ABBREV;
+    auto &GEPInst = cast<GetElementPtrInst>(I);
+    Vals.push_back(GEPInst.isInBounds());
+    Vals.push_back(VE.getTypeID(GEPInst.getSourceElementType()));
+    for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
+      pushValueAndType(I.getOperand(i), InstID, Vals);
+    break;
+  }
+  case Instruction::ExtractValue: {
+    Code = bitc::FUNC_CODE_INST_EXTRACTVAL;
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    const ExtractValueInst *EVI = cast<ExtractValueInst>(&I);
+    Vals.append(EVI->idx_begin(), EVI->idx_end());
+    break;
+  }
+  case Instruction::InsertValue: {
+    Code = bitc::FUNC_CODE_INST_INSERTVAL;
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    pushValueAndType(I.getOperand(1), InstID, Vals);
+    const InsertValueInst *IVI = cast<InsertValueInst>(&I);
+    Vals.append(IVI->idx_begin(), IVI->idx_end());
+    break;
+  }
+  case Instruction::Select:
+    Code = bitc::FUNC_CODE_INST_VSELECT;
+    pushValueAndType(I.getOperand(1), InstID, Vals);
+    pushValue(I.getOperand(2), InstID, Vals);
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    break;
+  case Instruction::ExtractElement:
+    Code = bitc::FUNC_CODE_INST_EXTRACTELT;
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    pushValueAndType(I.getOperand(1), InstID, Vals);
+    break;
+  case Instruction::InsertElement:
+    Code = bitc::FUNC_CODE_INST_INSERTELT;
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    pushValue(I.getOperand(1), InstID, Vals);
+    pushValueAndType(I.getOperand(2), InstID, Vals);
+    break;
+  case Instruction::ShuffleVector:
+    Code = bitc::FUNC_CODE_INST_SHUFFLEVEC;
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    pushValue(I.getOperand(1), InstID, Vals);
+    pushValue(I.getOperand(2), InstID, Vals);
+    break;
+  case Instruction::ICmp:
+  case Instruction::FCmp: {
+    // compare returning Int1Ty or vector of Int1Ty
+    Code = bitc::FUNC_CODE_INST_CMP2;
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    pushValue(I.getOperand(1), InstID, Vals);
+    Vals.push_back(cast<CmpInst>(I).getPredicate());
+    uint64_t Flags = getOptimizationFlags(&I);
+    if (Flags != 0)
+      Vals.push_back(Flags);
+    break;
+  }
+
+  case Instruction::Ret:
+    {
+      Code = bitc::FUNC_CODE_INST_RET;
+      unsigned NumOperands = I.getNumOperands();
+      if (NumOperands == 0)
+        AbbrevToUse = FUNCTION_INST_RET_VOID_ABBREV;
+      else if (NumOperands == 1) {
+        if (!pushValueAndType(I.getOperand(0), InstID, Vals))
+          AbbrevToUse = FUNCTION_INST_RET_VAL_ABBREV;
+      } else {
+        for (unsigned i = 0, e = NumOperands; i != e; ++i)
+          pushValueAndType(I.getOperand(i), InstID, Vals);
+      }
+    }
+    break;
+  case Instruction::Br:
+    {
+      Code = bitc::FUNC_CODE_INST_BR;
+      const BranchInst &II = cast<BranchInst>(I);
+      Vals.push_back(VE.getValueID(II.getSuccessor(0)));
+      if (II.isConditional()) {
+        Vals.push_back(VE.getValueID(II.getSuccessor(1)));
+        pushValue(II.getCondition(), InstID, Vals);
+      }
+    }
+    break;
+  case Instruction::Switch:
+    {
+      Code = bitc::FUNC_CODE_INST_SWITCH;
+      const SwitchInst &SI = cast<SwitchInst>(I);
+      Vals.push_back(VE.getTypeID(SI.getCondition()->getType()));
+      pushValue(SI.getCondition(), InstID, Vals);
+      Vals.push_back(VE.getValueID(SI.getDefaultDest()));
+      for (SwitchInst::ConstCaseIt Case : SI.cases()) {
+        Vals.push_back(VE.getValueID(Case.getCaseValue()));
+        Vals.push_back(VE.getValueID(Case.getCaseSuccessor()));
+      }
+    }
+    break;
+  case Instruction::IndirectBr:
+    Code = bitc::FUNC_CODE_INST_INDIRECTBR;
+    Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
+    // Encode the address operand as relative, but not the basic blocks.
+    pushValue(I.getOperand(0), InstID, Vals);
+    for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i)
+      Vals.push_back(VE.getValueID(I.getOperand(i)));
+    break;
+
+  case Instruction::Invoke: {
+    const InvokeInst *II = cast<InvokeInst>(&I);
+    const Value *Callee = II->getCalledValue();
+    FunctionType *FTy = II->getFunctionType();
+
+    if (II->hasOperandBundles())
+      writeOperandBundles(II, InstID);
+
+    Code = bitc::FUNC_CODE_INST_INVOKE;
+
+    Vals.push_back(VE.getAttributeID(II->getAttributes()));
+    Vals.push_back(II->getCallingConv() | 1 << 13);
+    Vals.push_back(VE.getValueID(II->getNormalDest()));
+    Vals.push_back(VE.getValueID(II->getUnwindDest()));
+    Vals.push_back(VE.getTypeID(FTy));
+    pushValueAndType(Callee, InstID, Vals);
+
+    // Emit value #'s for the fixed parameters.
+    for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+      pushValue(I.getOperand(i), InstID, Vals); // fixed param.
+
+    // Emit type/value pairs for varargs params.
+    if (FTy->isVarArg()) {
+      for (unsigned i = FTy->getNumParams(), e = II->getNumArgOperands();
+           i != e; ++i)
+        pushValueAndType(I.getOperand(i), InstID, Vals); // vararg
+    }
+    break;
+  }
+  case Instruction::Resume:
+    Code = bitc::FUNC_CODE_INST_RESUME;
+    pushValueAndType(I.getOperand(0), InstID, Vals);
+    break;
+  case Instruction::CleanupRet: {
+    Code = bitc::FUNC_CODE_INST_CLEANUPRET;
+    const auto &CRI = cast<CleanupReturnInst>(I);
+    pushValue(CRI.getCleanupPad(), InstID, Vals);
+    if (CRI.hasUnwindDest())
+      Vals.push_back(VE.getValueID(CRI.getUnwindDest()));
+    break;
+  }
+  case Instruction::CatchRet: {
+    Code = bitc::FUNC_CODE_INST_CATCHRET;
+    const auto &CRI = cast<CatchReturnInst>(I);
+    pushValue(CRI.getCatchPad(), InstID, Vals);
+    Vals.push_back(VE.getValueID(CRI.getSuccessor()));
+    break;
+  }
+  case Instruction::CleanupPad:
+  case Instruction::CatchPad: {
+    const auto &FuncletPad = cast<FuncletPadInst>(I);
+    Code = isa<CatchPadInst>(FuncletPad) ? bitc::FUNC_CODE_INST_CATCHPAD
+                                         : bitc::FUNC_CODE_INST_CLEANUPPAD;
+    pushValue(FuncletPad.getParentPad(), InstID, Vals);
+
+    unsigned NumArgOperands = FuncletPad.getNumArgOperands();
+    Vals.push_back(NumArgOperands);
+    for (unsigned Op = 0; Op != NumArgOperands; ++Op)
+      pushValueAndType(FuncletPad.getArgOperand(Op), InstID, Vals);
+    break;
+  }
+  case Instruction::CatchSwitch: {
+    Code = bitc::FUNC_CODE_INST_CATCHSWITCH;
+    const auto &CatchSwitch = cast<CatchSwitchInst>(I);
+
+    pushValue(CatchSwitch.getParentPad(), InstID, Vals);
+
+    unsigned NumHandlers = CatchSwitch.getNumHandlers();
+    Vals.push_back(NumHandlers);
+    for (const BasicBlock *CatchPadBB : CatchSwitch.handlers())
+      Vals.push_back(VE.getValueID(CatchPadBB));
+
+    if (CatchSwitch.hasUnwindDest())
+      Vals.push_back(VE.getValueID(CatchSwitch.getUnwindDest()));
+    break;
+  }
+  case Instruction::Unreachable:
+    Code = bitc::FUNC_CODE_INST_UNREACHABLE;
+    AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV;
+    break;
+
+  case Instruction::PHI: {
+    const PHINode &PN = cast<PHINode>(I);
+    Code = bitc::FUNC_CODE_INST_PHI;
+    // With the newer instruction encoding, forward references could give
+    // negative valued IDs.  This is most common for PHIs, so we use
+    // signed VBRs.
+    SmallVector<uint64_t, 128> Vals64;
+    Vals64.push_back(VE.getTypeID(PN.getType()));
+    for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
+      pushValueSigned(PN.getIncomingValue(i), InstID, Vals64);
+      Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i)));
+    }
+    // Emit a Vals64 vector and exit.
+    Stream.EmitRecord(Code, Vals64, AbbrevToUse);
+    Vals64.clear();
+    return;
+  }
+
+  case Instruction::LandingPad: {
+    const LandingPadInst &LP = cast<LandingPadInst>(I);
+    Code = bitc::FUNC_CODE_INST_LANDINGPAD;
+    Vals.push_back(VE.getTypeID(LP.getType()));
+    Vals.push_back(LP.isCleanup());
+    Vals.push_back(LP.getNumClauses());
+    for (unsigned I = 0, E = LP.getNumClauses(); I != E; ++I) {
+      if (LP.isCatch(I))
+        Vals.push_back(LandingPadInst::Catch);
+      else
+        Vals.push_back(LandingPadInst::Filter);
+      pushValueAndType(LP.getClause(I), InstID, Vals);
+    }
+    break;
+  }
+
+  case Instruction::Alloca: {
+    Code = bitc::FUNC_CODE_INST_ALLOCA;
+    const AllocaInst &AI = cast<AllocaInst>(I);
+    Vals.push_back(VE.getTypeID(AI.getAllocatedType()));
+    Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
+    Vals.push_back(VE.getValueID(I.getOperand(0))); // size.
+    unsigned AlignRecord = Log2_32(AI.getAlignment()) + 1;
+    assert(Log2_32(Value::MaximumAlignment) + 1 < 1 << 5 &&
+           "not enough bits for maximum alignment");
+    assert(AlignRecord < 1 << 5 && "alignment greater than 1 << 64");
+    AlignRecord |= AI.isUsedWithInAlloca() << 5;
+    AlignRecord |= 1 << 6;
+    AlignRecord |= AI.isSwiftError() << 7;
+    Vals.push_back(AlignRecord);
+    break;
+  }
+
+  case Instruction::Load:
+    if (cast<LoadInst>(I).isAtomic()) {
+      Code = bitc::FUNC_CODE_INST_LOADATOMIC;
+      pushValueAndType(I.getOperand(0), InstID, Vals);
+    } else {
+      Code = bitc::FUNC_CODE_INST_LOAD;
+      if (!pushValueAndType(I.getOperand(0), InstID, Vals)) // ptr
+        AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
+    }
+    Vals.push_back(VE.getTypeID(I.getType()));
+    Vals.push_back(Log2_32(cast<LoadInst>(I).getAlignment())+1);
+    Vals.push_back(cast<LoadInst>(I).isVolatile());
+    if (cast<LoadInst>(I).isAtomic()) {
+      Vals.push_back(getEncodedOrdering(cast<LoadInst>(I).getOrdering()));
+      Vals.push_back(getEncodedSynchScope(cast<LoadInst>(I).getSynchScope()));
+    }
+    break;
+  case Instruction::Store:
+    if (cast<StoreInst>(I).isAtomic())
+      Code = bitc::FUNC_CODE_INST_STOREATOMIC;
+    else
+      Code = bitc::FUNC_CODE_INST_STORE;
+    pushValueAndType(I.getOperand(1), InstID, Vals); // ptrty + ptr
+    pushValueAndType(I.getOperand(0), InstID, Vals); // valty + val
+    Vals.push_back(Log2_32(cast<StoreInst>(I).getAlignment())+1);
+    Vals.push_back(cast<StoreInst>(I).isVolatile());
+    if (cast<StoreInst>(I).isAtomic()) {
+      Vals.push_back(getEncodedOrdering(cast<StoreInst>(I).getOrdering()));
+      Vals.push_back(getEncodedSynchScope(cast<StoreInst>(I).getSynchScope()));
+    }
+    break;
+  case Instruction::AtomicCmpXchg:
+    Code = bitc::FUNC_CODE_INST_CMPXCHG;
+    pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr
+    pushValueAndType(I.getOperand(1), InstID, Vals); // cmp.
+    pushValue(I.getOperand(2), InstID, Vals);        // newval.
+    Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
+    Vals.push_back(
+        getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getSuccessOrdering()));
+    Vals.push_back(
+        getEncodedSynchScope(cast<AtomicCmpXchgInst>(I).getSynchScope()));
+    Vals.push_back(
+        getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getFailureOrdering()));
+    Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak());
+    break;
+  case Instruction::AtomicRMW:
+    Code = bitc::FUNC_CODE_INST_ATOMICRMW;
+    pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr
+    pushValue(I.getOperand(1), InstID, Vals);        // val.
+    Vals.push_back(
+        getEncodedRMWOperation(cast<AtomicRMWInst>(I).getOperation()));
+    Vals.push_back(cast<AtomicRMWInst>(I).isVolatile());
+    Vals.push_back(getEncodedOrdering(cast<AtomicRMWInst>(I).getOrdering()));
+    Vals.push_back(
+        getEncodedSynchScope(cast<AtomicRMWInst>(I).getSynchScope()));
+    break;
+  case Instruction::Fence:
+    Code = bitc::FUNC_CODE_INST_FENCE;
+    Vals.push_back(getEncodedOrdering(cast<FenceInst>(I).getOrdering()));
+    Vals.push_back(getEncodedSynchScope(cast<FenceInst>(I).getSynchScope()));
+    break;
+  case Instruction::Call: {
+    const CallInst &CI = cast<CallInst>(I);
+    FunctionType *FTy = CI.getFunctionType();
+
+    if (CI.hasOperandBundles())
+      writeOperandBundles(&CI, InstID);
+
+    Code = bitc::FUNC_CODE_INST_CALL;
+
+    Vals.push_back(VE.getAttributeID(CI.getAttributes()));
+
+    unsigned Flags = getOptimizationFlags(&I);
+    Vals.push_back(CI.getCallingConv() << bitc::CALL_CCONV |
+                   unsigned(CI.isTailCall()) << bitc::CALL_TAIL |
+                   unsigned(CI.isMustTailCall()) << bitc::CALL_MUSTTAIL |
+                   1 << bitc::CALL_EXPLICIT_TYPE |
+                   unsigned(CI.isNoTailCall()) << bitc::CALL_NOTAIL |
+                   unsigned(Flags != 0) << bitc::CALL_FMF);
+    if (Flags != 0)
+      Vals.push_back(Flags);
+
+    Vals.push_back(VE.getTypeID(FTy));
+    pushValueAndType(CI.getCalledValue(), InstID, Vals); // Callee
+
+    // Emit value #'s for the fixed parameters.
+    for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
+      // Check for labels (can happen with asm labels).
+      if (FTy->getParamType(i)->isLabelTy())
+        Vals.push_back(VE.getValueID(CI.getArgOperand(i)));
+      else
+        pushValue(CI.getArgOperand(i), InstID, Vals); // fixed param.
+    }
+
+    // Emit type/value pairs for varargs params.
+    if (FTy->isVarArg()) {
+      for (unsigned i = FTy->getNumParams(), e = CI.getNumArgOperands();
+           i != e; ++i)
+        pushValueAndType(CI.getArgOperand(i), InstID, Vals); // varargs
+    }
+    break;
+  }
+  case Instruction::VAArg:
+    Code = bitc::FUNC_CODE_INST_VAARG;
+    Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));   // valistty
+    pushValue(I.getOperand(0), InstID, Vals);                   // valist.
+    Vals.push_back(VE.getTypeID(I.getType())); // restype.
+    break;
+  }
+
+  Stream.EmitRecord(Code, Vals, AbbrevToUse);
+  Vals.clear();
+}
+
+/// Emit names for globals/functions etc. \p IsModuleLevel is true when
+/// we are writing the module-level VST, where we are including a function
+/// bitcode index and need to backpatch the VST forward declaration record.
+void ModuleBitcodeWriter::writeValueSymbolTable(
+    const ValueSymbolTable &VST, bool IsModuleLevel,
+    DenseMap<const Function *, uint64_t> *FunctionToBitcodeIndex) {
+  if (VST.empty()) {
+    // writeValueSymbolTableForwardDecl should have returned early as
+    // well. Ensure this handling remains in sync by asserting that
+    // the placeholder offset is not set.
+    assert(!IsModuleLevel || !hasVSTOffsetPlaceholder());
+    return;
+  }
+
+  if (IsModuleLevel && hasVSTOffsetPlaceholder()) {
+    // Get the offset of the VST we are writing, and backpatch it into
+    // the VST forward declaration record.
+    uint64_t VSTOffset = Stream.GetCurrentBitNo();
+    // The BitcodeStartBit was the stream offset of the identification block.
+    VSTOffset -= bitcodeStartBit();
+    assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned");
+    // Note that we add 1 here because the offset is relative to one word
+    // before the start of the identification block, which was historically
+    // always the start of the regular bitcode header.
+    Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32 + 1);
+  }
+
+  Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
+
+  // For the module-level VST, add abbrev Ids for the VST_CODE_FNENTRY
+  // records, which are not used in the per-function VSTs.
+  unsigned FnEntry8BitAbbrev;
+  unsigned FnEntry7BitAbbrev;
+  unsigned FnEntry6BitAbbrev;
+  unsigned GUIDEntryAbbrev;
+  if (IsModuleLevel && hasVSTOffsetPlaceholder()) {
+    // 8-bit fixed-width VST_CODE_FNENTRY function strings.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+    FnEntry8BitAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+    // 7-bit fixed width VST_CODE_FNENTRY function strings.
+    Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
+    FnEntry7BitAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+    // 6-bit char6 VST_CODE_FNENTRY function strings.
+    Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+    FnEntry6BitAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+    // FIXME: Change the name of this record as it is now used by
+    // the per-module index as well.
+    Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_COMBINED_ENTRY));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // refguid
+    GUIDEntryAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+  }
+
+  // FIXME: Set up the abbrev, we know how many values there are!
+  // FIXME: We know if the type names can use 7-bit ascii.
+  SmallVector<uint64_t, 64> NameVals;
+
+  for (const ValueName &Name : VST) {
+    // Figure out the encoding to use for the name.
+    StringEncoding Bits =
+        getStringEncoding(Name.getKeyData(), Name.getKeyLength());
+
+    unsigned AbbrevToUse = VST_ENTRY_8_ABBREV;
+    NameVals.push_back(VE.getValueID(Name.getValue()));
+
+    Function *F = dyn_cast<Function>(Name.getValue());
+    if (!F) {
+      // If value is an alias, need to get the aliased base object to
+      // see if it is a function.
+      auto *GA = dyn_cast<GlobalAlias>(Name.getValue());
+      if (GA && GA->getBaseObject())
+        F = dyn_cast<Function>(GA->getBaseObject());
+    }
+
+    // VST_CODE_ENTRY:   [valueid, namechar x N]
+    // VST_CODE_FNENTRY: [valueid, funcoffset, namechar x N]
+    // VST_CODE_BBENTRY: [bbid, namechar x N]
+    unsigned Code;
+    if (isa<BasicBlock>(Name.getValue())) {
+      Code = bitc::VST_CODE_BBENTRY;
+      if (Bits == SE_Char6)
+        AbbrevToUse = VST_BBENTRY_6_ABBREV;
+    } else if (F && !F->isDeclaration()) {
+      // Must be the module-level VST, where we pass in the Index and
+      // have a VSTOffsetPlaceholder. The function-level VST should not
+      // contain any Function symbols.
+      assert(FunctionToBitcodeIndex);
+      assert(hasVSTOffsetPlaceholder());
+
+      // Save the word offset of the function (from the start of the
+      // actual bitcode written to the stream).
+      uint64_t BitcodeIndex = (*FunctionToBitcodeIndex)[F] - bitcodeStartBit();
+      assert((BitcodeIndex & 31) == 0 && "function block not 32-bit aligned");
+      // Note that we add 1 here because the offset is relative to one word
+      // before the start of the identification block, which was historically
+      // always the start of the regular bitcode header.
+      NameVals.push_back(BitcodeIndex / 32 + 1);
+
+      Code = bitc::VST_CODE_FNENTRY;
+      AbbrevToUse = FnEntry8BitAbbrev;
+      if (Bits == SE_Char6)
+        AbbrevToUse = FnEntry6BitAbbrev;
+      else if (Bits == SE_Fixed7)
+        AbbrevToUse = FnEntry7BitAbbrev;
+    } else {
+      Code = bitc::VST_CODE_ENTRY;
+      if (Bits == SE_Char6)
+        AbbrevToUse = VST_ENTRY_6_ABBREV;
+      else if (Bits == SE_Fixed7)
+        AbbrevToUse = VST_ENTRY_7_ABBREV;
+    }
+
+    for (const auto P : Name.getKey())
+      NameVals.push_back((unsigned char)P);
+
+    // Emit the finished record.
+    Stream.EmitRecord(Code, NameVals, AbbrevToUse);
+    NameVals.clear();
+  }
+  // Emit any GUID valueIDs created for indirect call edges into the
+  // module-level VST.
+  if (IsModuleLevel && hasVSTOffsetPlaceholder())
+    for (const auto &GI : valueIds()) {
+      NameVals.push_back(GI.second);
+      NameVals.push_back(GI.first);
+      Stream.EmitRecord(bitc::VST_CODE_COMBINED_ENTRY, NameVals,
+                        GUIDEntryAbbrev);
+      NameVals.clear();
+    }
+  Stream.ExitBlock();
+}
+
+/// Emit function names and summary offsets for the combined index
+/// used by ThinLTO.
+void IndexBitcodeWriter::writeCombinedValueSymbolTable() {
+  assert(hasVSTOffsetPlaceholder() && "Expected non-zero VSTOffsetPlaceholder");
+  // Get the offset of the VST we are writing, and backpatch it into
+  // the VST forward declaration record.
+  uint64_t VSTOffset = Stream.GetCurrentBitNo();
+  assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned");
+  Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32);
+
+  Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
+
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_COMBINED_ENTRY));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // refguid
+  unsigned EntryAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  SmallVector<uint64_t, 64> NameVals;
+  for (const auto &GVI : valueIds()) {
+    // VST_CODE_COMBINED_ENTRY: [valueid, refguid]
+    NameVals.push_back(GVI.second);
+    NameVals.push_back(GVI.first);
+
+    // Emit the finished record.
+    Stream.EmitRecord(bitc::VST_CODE_COMBINED_ENTRY, NameVals, EntryAbbrev);
+    NameVals.clear();
+  }
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeUseList(UseListOrder &&Order) {
+  assert(Order.Shuffle.size() >= 2 && "Shuffle too small");
+  unsigned Code;
+  if (isa<BasicBlock>(Order.V))
+    Code = bitc::USELIST_CODE_BB;
+  else
+    Code = bitc::USELIST_CODE_DEFAULT;
+
+  SmallVector<uint64_t, 64> Record(Order.Shuffle.begin(), Order.Shuffle.end());
+  Record.push_back(VE.getValueID(Order.V));
+  Stream.EmitRecord(Code, Record);
+}
+
+void ModuleBitcodeWriter::writeUseListBlock(const Function *F) {
+  assert(VE.shouldPreserveUseListOrder() &&
+         "Expected to be preserving use-list order");
+
+  auto hasMore = [&]() {
+    return !VE.UseListOrders.empty() && VE.UseListOrders.back().F == F;
+  };
+  if (!hasMore())
+    // Nothing to do.
+    return;
+
+  Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3);
+  while (hasMore()) {
+    writeUseList(std::move(VE.UseListOrders.back()));
+    VE.UseListOrders.pop_back();
+  }
+  Stream.ExitBlock();
+}
+
+/// Emit a function body to the module stream.
+void ModuleBitcodeWriter::writeFunction(
+    const Function &F,
+    DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex) {
+  // Save the bitcode index of the start of this function block for recording
+  // in the VST.
+  FunctionToBitcodeIndex[&F] = Stream.GetCurrentBitNo();
+
+  Stream.EnterSubblock(bitc::FUNCTION_BLOCK_ID, 4);
+  VE.incorporateFunction(F);
+
+  SmallVector<unsigned, 64> Vals;
+
+  // Emit the number of basic blocks, so the reader can create them ahead of
+  // time.
+  Vals.push_back(VE.getBasicBlocks().size());
+  Stream.EmitRecord(bitc::FUNC_CODE_DECLAREBLOCKS, Vals);
+  Vals.clear();
+
+  // If there are function-local constants, emit them now.
+  unsigned CstStart, CstEnd;
+  VE.getFunctionConstantRange(CstStart, CstEnd);
+  writeConstants(CstStart, CstEnd, false);
+
+  // If there is function-local metadata, emit it now.
+  writeFunctionMetadata(F);
+
+  // Keep a running idea of what the instruction ID is.
+  unsigned InstID = CstEnd;
+
+  bool NeedsMetadataAttachment = F.hasMetadata();
+
+  DILocation *LastDL = nullptr;
+  // Finally, emit all the instructions, in order.
+  for (Function::const_iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
+    for (BasicBlock::const_iterator I = BB->begin(), E = BB->end();
+         I != E; ++I) {
+      writeInstruction(*I, InstID, Vals);
+
+      if (!I->getType()->isVoidTy())
+        ++InstID;
+
+      // If the instruction has metadata, write a metadata attachment later.
+      NeedsMetadataAttachment |= I->hasMetadataOtherThanDebugLoc();
+
+      // If the instruction has a debug location, emit it.
+      DILocation *DL = I->getDebugLoc();
+      if (!DL)
+        continue;
+
+      if (DL == LastDL) {
+        // Just repeat the same debug loc as last time.
+        Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC_AGAIN, Vals);
+        continue;
+      }
+
+      Vals.push_back(DL->getLine());
+      Vals.push_back(DL->getColumn());
+      Vals.push_back(VE.getMetadataOrNullID(DL->getScope()));
+      Vals.push_back(VE.getMetadataOrNullID(DL->getInlinedAt()));
+      Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC, Vals);
+      Vals.clear();
+
+      LastDL = DL;
+    }
+
+  // Emit names for all the instructions etc.
+  if (auto *Symtab = F.getValueSymbolTable())
+    writeValueSymbolTable(*Symtab);
+
+  if (NeedsMetadataAttachment)
+    writeFunctionMetadataAttachment(F);
+  if (VE.shouldPreserveUseListOrder())
+    writeUseListBlock(&F);
+  VE.purgeFunction();
+  Stream.ExitBlock();
+}
+
+// Emit blockinfo, which defines the standard abbreviations etc.
+void ModuleBitcodeWriter::writeBlockInfo() {
+  // We only want to emit block info records for blocks that have multiple
+  // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK.
+  // Other blocks can define their abbrevs inline.
+  Stream.EnterBlockInfoBlock();
+
+  { // 8-bit fixed-width VST_CODE_ENTRY/VST_CODE_BBENTRY strings.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+        VST_ENTRY_8_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+
+  { // 7-bit fixed width VST_CODE_ENTRY strings.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
+    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+        VST_ENTRY_7_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // 6-bit char6 VST_CODE_ENTRY strings.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+        VST_ENTRY_6_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // 6-bit char6 VST_CODE_BBENTRY strings.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_BBENTRY));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+    if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+        VST_BBENTRY_6_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+
+
+
+  { // SETTYPE abbrev for CONSTANTS_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+                              VE.computeBitsRequiredForTypeIndicies()));
+    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+        CONSTANTS_SETTYPE_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+
+  { // INTEGER abbrev for CONSTANTS_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_INTEGER));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+        CONSTANTS_INTEGER_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+
+  { // CE_CAST abbrev for CONSTANTS_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4));  // cast opc
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,       // typeid
+                              VE.computeBitsRequiredForTypeIndicies()));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));    // value id
+
+    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+        CONSTANTS_CE_CAST_Abbrev)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // NULL abbrev for CONSTANTS_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_NULL));
+    if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+        CONSTANTS_NULL_Abbrev)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+
+  // FIXME: This should only use space for first class types!
+
+  { // INST_LOAD abbrev for FUNCTION_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,    // dest ty
+                              VE.computeBitsRequiredForTypeIndicies()));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_LOAD_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // INST_BINOP abbrev for FUNCTION_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_BINOP_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // INST_BINOP_FLAGS abbrev for FUNCTION_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7)); // flags
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_BINOP_FLAGS_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // INST_CAST abbrev for FUNCTION_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));    // OpVal
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,       // dest ty
+                              VE.computeBitsRequiredForTypeIndicies()));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4));  // opc
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_CAST_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+
+  { // INST_RET abbrev for FUNCTION_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET));
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_RET_VOID_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // INST_RET abbrev for FUNCTION_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ValID
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_RET_VAL_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  { // INST_UNREACHABLE abbrev for FUNCTION_BLOCK.
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNREACHABLE));
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_UNREACHABLE_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+  {
+    auto Abbv = std::make_shared<BitCodeAbbrev>();
+    Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_GEP));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
+                              Log2_32_Ceil(VE.getTypes().size() + 1)));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+    Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+    if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+        FUNCTION_INST_GEP_ABBREV)
+      llvm_unreachable("Unexpected abbrev ordering!");
+  }
+
+  Stream.ExitBlock();
+}
+
+/// Write the module path strings, currently only used when generating
+/// a combined index file.
+void IndexBitcodeWriter::writeModStrings() {
+  Stream.EnterSubblock(bitc::MODULE_STRTAB_BLOCK_ID, 3);
+
+  // TODO: See which abbrev sizes we actually need to emit
+
+  // 8-bit fixed-width MST_ENTRY strings.
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+  unsigned Abbrev8Bit = Stream.EmitAbbrev(std::move(Abbv));
+
+  // 7-bit fixed width MST_ENTRY strings.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
+  unsigned Abbrev7Bit = Stream.EmitAbbrev(std::move(Abbv));
+
+  // 6-bit char6 MST_ENTRY strings.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+  unsigned Abbrev6Bit = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Module Hash, 160 bits SHA1. Optionally, emitted after each MST_CODE_ENTRY.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_HASH));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+  unsigned AbbrevHash = Stream.EmitAbbrev(std::move(Abbv));
+
+  SmallVector<unsigned, 64> Vals;
+  for (const auto &MPSE : Index.modulePaths()) {
+    if (!doIncludeModule(MPSE.getKey()))
+      continue;
+    StringEncoding Bits =
+        getStringEncoding(MPSE.getKey().data(), MPSE.getKey().size());
+    unsigned AbbrevToUse = Abbrev8Bit;
+    if (Bits == SE_Char6)
+      AbbrevToUse = Abbrev6Bit;
+    else if (Bits == SE_Fixed7)
+      AbbrevToUse = Abbrev7Bit;
+
+    Vals.push_back(MPSE.getValue().first);
+
+    for (const auto P : MPSE.getKey())
+      Vals.push_back((unsigned char)P);
+
+    // Emit the finished record.
+    Stream.EmitRecord(bitc::MST_CODE_ENTRY, Vals, AbbrevToUse);
+
+    Vals.clear();
+    // Emit an optional hash for the module now
+    auto &Hash = MPSE.getValue().second;
+    bool AllZero = true; // Detect if the hash is empty, and do not generate it
+    for (auto Val : Hash) {
+      if (Val)
+        AllZero = false;
+      Vals.push_back(Val);
+    }
+    if (!AllZero) {
+      // Emit the hash record.
+      Stream.EmitRecord(bitc::MST_CODE_HASH, Vals, AbbrevHash);
+    }
+
+    Vals.clear();
+  }
+  Stream.ExitBlock();
+}
+
+// Helper to emit a single function summary record.
+void ModuleBitcodeWriter::writePerModuleFunctionSummaryRecord(
+    SmallVector<uint64_t, 64> &NameVals, GlobalValueSummary *Summary,
+    unsigned ValueID, unsigned FSCallsAbbrev, unsigned FSCallsProfileAbbrev,
+    const Function &F) {
+  NameVals.push_back(ValueID);
+
+  FunctionSummary *FS = cast<FunctionSummary>(Summary);
+  if (!FS->type_tests().empty())
+    Stream.EmitRecord(bitc::FS_TYPE_TESTS, FS->type_tests());
+
+  NameVals.push_back(getEncodedGVSummaryFlags(FS->flags()));
+  NameVals.push_back(FS->instCount());
+  NameVals.push_back(FS->refs().size());
+
+  for (auto &RI : FS->refs())
+    NameVals.push_back(VE.getValueID(RI.getValue()));
+
+  bool HasProfileData = F.getEntryCount().hasValue();
+  for (auto &ECI : FS->calls()) {
+    NameVals.push_back(getValueId(ECI.first));
+    if (HasProfileData)
+      NameVals.push_back(static_cast<uint8_t>(ECI.second.Hotness));
+  }
+
+  unsigned FSAbbrev = (HasProfileData ? FSCallsProfileAbbrev : FSCallsAbbrev);
+  unsigned Code =
+      (HasProfileData ? bitc::FS_PERMODULE_PROFILE : bitc::FS_PERMODULE);
+
+  // Emit the finished record.
+  Stream.EmitRecord(Code, NameVals, FSAbbrev);
+  NameVals.clear();
+}
+
+// Collect the global value references in the given variable's initializer,
+// and emit them in a summary record.
+void ModuleBitcodeWriter::writeModuleLevelReferences(
+    const GlobalVariable &V, SmallVector<uint64_t, 64> &NameVals,
+    unsigned FSModRefsAbbrev) {
+  auto Summaries =
+      Index->findGlobalValueSummaryList(GlobalValue::getGUID(V.getName()));
+  if (Summaries == Index->end()) {
+    // Only declarations should not have a summary (a declaration might however
+    // have a summary if the def was in module level asm).
+    assert(V.isDeclaration());
+    return;
+  }
+  auto *Summary = Summaries->second.front().get();
+  NameVals.push_back(VE.getValueID(&V));
+  GlobalVarSummary *VS = cast<GlobalVarSummary>(Summary);
+  NameVals.push_back(getEncodedGVSummaryFlags(VS->flags()));
+
+  unsigned SizeBeforeRefs = NameVals.size();
+  for (auto &RI : VS->refs())
+    NameVals.push_back(VE.getValueID(RI.getValue()));
+  // Sort the refs for determinism output, the vector returned by FS->refs() has
+  // been initialized from a DenseSet.
+  std::sort(NameVals.begin() + SizeBeforeRefs, NameVals.end());
+
+  Stream.EmitRecord(bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS, NameVals,
+                    FSModRefsAbbrev);
+  NameVals.clear();
+}
+
+// Current version for the summary.
+// This is bumped whenever we introduce changes in the way some record are
+// interpreted, like flags for instance.
+static const uint64_t INDEX_VERSION = 3;
+
+/// Emit the per-module summary section alongside the rest of
+/// the module's bitcode.
+void ModuleBitcodeWriter::writePerModuleGlobalValueSummary() {
+  Stream.EnterSubblock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID, 4);
+
+  Stream.EmitRecord(bitc::FS_VERSION, ArrayRef<uint64_t>{INDEX_VERSION});
+
+  if (Index->begin() == Index->end()) {
+    Stream.ExitBlock();
+    return;
+  }
+
+  // Abbrev for FS_PERMODULE.
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs
+  // numrefs x valueid, n x (valueid)
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  unsigned FSCallsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for FS_PERMODULE_PROFILE.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_PROFILE));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs
+  // numrefs x valueid, n x (valueid, hotness)
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  unsigned FSCallsProfileAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for FS_PERMODULE_GLOBALVAR_INIT_REFS.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));  // valueids
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  unsigned FSModRefsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for FS_ALIAS.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_ALIAS));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  unsigned FSAliasAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  SmallVector<uint64_t, 64> NameVals;
+  // Iterate over the list of functions instead of the Index to
+  // ensure the ordering is stable.
+  for (const Function &F : M) {
+    // Summary emission does not support anonymous functions, they have to
+    // renamed using the anonymous function renaming pass.
+    if (!F.hasName())
+      report_fatal_error("Unexpected anonymous function when writing summary");
+
+    auto Summaries =
+        Index->findGlobalValueSummaryList(GlobalValue::getGUID(F.getName()));
+    if (Summaries == Index->end()) {
+      // Only declarations should not have a summary (a declaration might
+      // however have a summary if the def was in module level asm).
+      assert(F.isDeclaration());
+      continue;
+    }
+    auto *Summary = Summaries->second.front().get();
+    writePerModuleFunctionSummaryRecord(NameVals, Summary, VE.getValueID(&F),
+                                        FSCallsAbbrev, FSCallsProfileAbbrev, F);
+  }
+
+  // Capture references from GlobalVariable initializers, which are outside
+  // of a function scope.
+  for (const GlobalVariable &G : M.globals())
+    writeModuleLevelReferences(G, NameVals, FSModRefsAbbrev);
+
+  for (const GlobalAlias &A : M.aliases()) {
+    auto *Aliasee = A.getBaseObject();
+    if (!Aliasee->hasName())
+      // Nameless function don't have an entry in the summary, skip it.
+      continue;
+    auto AliasId = VE.getValueID(&A);
+    auto AliaseeId = VE.getValueID(Aliasee);
+    NameVals.push_back(AliasId);
+    auto *Summary = Index->getGlobalValueSummary(A);
+    AliasSummary *AS = cast<AliasSummary>(Summary);
+    NameVals.push_back(getEncodedGVSummaryFlags(AS->flags()));
+    NameVals.push_back(AliaseeId);
+    Stream.EmitRecord(bitc::FS_ALIAS, NameVals, FSAliasAbbrev);
+    NameVals.clear();
+  }
+
+  Stream.ExitBlock();
+}
+
+/// Emit the combined summary section into the combined index file.
+void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
+  Stream.EnterSubblock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID, 3);
+  Stream.EmitRecord(bitc::FS_VERSION, ArrayRef<uint64_t>{INDEX_VERSION});
+
+  // Abbrev for FS_COMBINED.
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs
+  // numrefs x valueid, n x (valueid)
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  unsigned FSCallsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for FS_COMBINED_PROFILE.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_PROFILE));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // instcount
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4));   // numrefs
+  // numrefs x valueid, n x (valueid, hotness)
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  unsigned FSCallsProfileAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for FS_COMBINED_GLOBALVAR_INIT_REFS.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_GLOBALVAR_INIT_REFS));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));    // valueids
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+  unsigned FSModRefsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // Abbrev for FS_COMBINED_ALIAS.
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_ALIAS));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // modid
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));   // flags
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));   // valueid
+  unsigned FSAliasAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+  // The aliases are emitted as a post-pass, and will point to the value
+  // id of the aliasee. Save them in a vector for post-processing.
+  SmallVector<AliasSummary *, 64> Aliases;
+
+  // Save the value id for each summary for alias emission.
+  DenseMap<const GlobalValueSummary *, unsigned> SummaryToValueIdMap;
+
+  SmallVector<uint64_t, 64> NameVals;
+
+  // For local linkage, we also emit the original name separately
+  // immediately after the record.
+  auto MaybeEmitOriginalName = [&](GlobalValueSummary &S) {
+    if (!GlobalValue::isLocalLinkage(S.linkage()))
+      return;
+    NameVals.push_back(S.getOriginalName());
+    Stream.EmitRecord(bitc::FS_COMBINED_ORIGINAL_NAME, NameVals);
+    NameVals.clear();
+  };
+
+  for (const auto &I : *this) {
+    GlobalValueSummary *S = I.second;
+    assert(S);
+
+    assert(hasValueId(I.first));
+    unsigned ValueId = getValueId(I.first);
+    SummaryToValueIdMap[S] = ValueId;
+
+    if (auto *AS = dyn_cast<AliasSummary>(S)) {
+      // Will process aliases as a post-pass because the reader wants all
+      // global to be loaded first.
+      Aliases.push_back(AS);
+      continue;
+    }
+
+    if (auto *VS = dyn_cast<GlobalVarSummary>(S)) {
+      NameVals.push_back(ValueId);
+      NameVals.push_back(Index.getModuleId(VS->modulePath()));
+      NameVals.push_back(getEncodedGVSummaryFlags(VS->flags()));
+      for (auto &RI : VS->refs()) {
+        NameVals.push_back(getValueId(RI.getGUID()));
+      }
+
+      // Emit the finished record.
+      Stream.EmitRecord(bitc::FS_COMBINED_GLOBALVAR_INIT_REFS, NameVals,
+                        FSModRefsAbbrev);
+      NameVals.clear();
+      MaybeEmitOriginalName(*S);
+      continue;
+    }
+
+    auto *FS = cast<FunctionSummary>(S);
+    if (!FS->type_tests().empty())
+      Stream.EmitRecord(bitc::FS_TYPE_TESTS, FS->type_tests());
+
+    NameVals.push_back(ValueId);
+    NameVals.push_back(Index.getModuleId(FS->modulePath()));
+    NameVals.push_back(getEncodedGVSummaryFlags(FS->flags()));
+    NameVals.push_back(FS->instCount());
+    NameVals.push_back(FS->refs().size());
+
+    for (auto &RI : FS->refs()) {
+      NameVals.push_back(getValueId(RI.getGUID()));
+    }
+
+    bool HasProfileData = false;
+    for (auto &EI : FS->calls()) {
+      HasProfileData |= EI.second.Hotness != CalleeInfo::HotnessType::Unknown;
+      if (HasProfileData)
+        break;
+    }
+
+    for (auto &EI : FS->calls()) {
+      // If this GUID doesn't have a value id, it doesn't have a function
+      // summary and we don't need to record any calls to it.
+      if (!hasValueId(EI.first.getGUID()))
+        continue;
+      NameVals.push_back(getValueId(EI.first.getGUID()));
+      if (HasProfileData)
+        NameVals.push_back(static_cast<uint8_t>(EI.second.Hotness));
+    }
+
+    unsigned FSAbbrev = (HasProfileData ? FSCallsProfileAbbrev : FSCallsAbbrev);
+    unsigned Code =
+        (HasProfileData ? bitc::FS_COMBINED_PROFILE : bitc::FS_COMBINED);
+
+    // Emit the finished record.
+    Stream.EmitRecord(Code, NameVals, FSAbbrev);
+    NameVals.clear();
+    MaybeEmitOriginalName(*S);
+  }
+
+  for (auto *AS : Aliases) {
+    auto AliasValueId = SummaryToValueIdMap[AS];
+    assert(AliasValueId);
+    NameVals.push_back(AliasValueId);
+    NameVals.push_back(Index.getModuleId(AS->modulePath()));
+    NameVals.push_back(getEncodedGVSummaryFlags(AS->flags()));
+    auto AliaseeValueId = SummaryToValueIdMap[&AS->getAliasee()];
+    assert(AliaseeValueId);
+    NameVals.push_back(AliaseeValueId);
+
+    // Emit the finished record.
+    Stream.EmitRecord(bitc::FS_COMBINED_ALIAS, NameVals, FSAliasAbbrev);
+    NameVals.clear();
+    MaybeEmitOriginalName(*AS);
+  }
+
+  Stream.ExitBlock();
+}
+
+/// Create the "IDENTIFICATION_BLOCK_ID" containing a single string with the
+/// current llvm version, and a record for the epoch number.
+void writeIdentificationBlock(BitstreamWriter &Stream) {
+  Stream.EnterSubblock(bitc::IDENTIFICATION_BLOCK_ID, 5);
+
+  // Write the "user readable" string identifying the bitcode producer
+  auto Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::IDENTIFICATION_CODE_STRING));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+  auto StringAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+  writeStringRecord(Stream, bitc::IDENTIFICATION_CODE_STRING,
+                    "LLVM" LLVM_VERSION_STRING, StringAbbrev);
+
+  // Write the epoch version
+  Abbv = std::make_shared<BitCodeAbbrev>();
+  Abbv->Add(BitCodeAbbrevOp(bitc::IDENTIFICATION_CODE_EPOCH));
+  Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+  auto EpochAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+  SmallVector<unsigned, 1> Vals = {bitc::BITCODE_CURRENT_EPOCH};
+  Stream.EmitRecord(bitc::IDENTIFICATION_CODE_EPOCH, Vals, EpochAbbrev);
+  Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeModuleHash(size_t BlockStartPos) {
+  // Emit the module's hash.
+  // MODULE_CODE_HASH: [5*i32]
+  SHA1 Hasher;
+  Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&(Buffer)[BlockStartPos],
+                                  Buffer.size() - BlockStartPos));
+  StringRef Hash = Hasher.result();
+  uint32_t Vals[5];
+  for (int Pos = 0; Pos < 20; Pos += 4) {
+    Vals[Pos / 4] = support::endian::read32be(Hash.data() + Pos);
+  }
+
+  // Emit the finished record.
+  Stream.EmitRecord(bitc::MODULE_CODE_HASH, Vals);
+}
+
+void ModuleBitcodeWriter::write() {
+  writeIdentificationBlock(Stream);
+
+  Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
+  size_t BlockStartPos = Buffer.size();
+
+  SmallVector<unsigned, 1> Vals;
+  unsigned CurVersion = 1;
+  Vals.push_back(CurVersion);
+  Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
+
+  // Emit blockinfo, which defines the standard abbreviations etc.
+  writeBlockInfo();
+
+  // Emit information about attribute groups.
+  writeAttributeGroupTable();
+
+  // Emit information about parameter attributes.
+  writeAttributeTable();
+
+  // Emit information describing all of the types in the module.
+  writeTypeTable();
+
+  writeComdats();
+
+  // Emit top-level description of module, including target triple, inline asm,
+  // descriptors for global variables, and function prototype info.
+  writeModuleInfo();
+
+  // Emit constants.
+  writeModuleConstants();
+
+  // Emit metadata kind names.
+  writeModuleMetadataKinds();
+
+  // Emit metadata.
+  writeModuleMetadata();
+
+  // Emit module-level use-lists.
+  if (VE.shouldPreserveUseListOrder())
+    writeUseListBlock(nullptr);
+
+  writeOperandBundleTags();
+
+  // Emit function bodies.
+  DenseMap<const Function *, uint64_t> FunctionToBitcodeIndex;
+  for (Module::const_iterator F = M.begin(), E = M.end(); F != E; ++F)
+    if (!F->isDeclaration())
+      writeFunction(*F, FunctionToBitcodeIndex);
+
+  // Need to write after the above call to WriteFunction which populates
+  // the summary information in the index.
+  if (Index)
+    writePerModuleGlobalValueSummary();
+
+  writeValueSymbolTable(M.getValueSymbolTable(),
+                        /* IsModuleLevel */ true, &FunctionToBitcodeIndex);
+
+  if (GenerateHash) {
+    writeModuleHash(BlockStartPos);
+  }
+
+  Stream.ExitBlock();
+}
+
+static void writeInt32ToBuffer(uint32_t Value, SmallVectorImpl<char> &Buffer,
+                               uint32_t &Position) {
+  support::endian::write32le(&Buffer[Position], Value);
+  Position += 4;
+}
+
+/// If generating a bc file on darwin, we have to emit a
+/// header and trailer to make it compatible with the system archiver.  To do
+/// this we emit the following header, and then emit a trailer that pads the
+/// file out to be a multiple of 16 bytes.
+///
+/// struct bc_header {
+///   uint32_t Magic;         // 0x0B17C0DE
+///   uint32_t Version;       // Version, currently always 0.
+///   uint32_t BitcodeOffset; // Offset to traditional bitcode file.
+///   uint32_t BitcodeSize;   // Size of traditional bitcode file.
+///   uint32_t CPUType;       // CPU specifier.
+///   ... potentially more later ...
+/// };
+static void emitDarwinBCHeaderAndTrailer(SmallVectorImpl<char> &Buffer,
+                                         const Triple &TT) {
+  unsigned CPUType = ~0U;
+
+  // Match x86_64-*, i[3-9]86-*, powerpc-*, powerpc64-*, arm-*, thumb-*,
+  // armv[0-9]-*, thumbv[0-9]-*, armv5te-*, or armv6t2-*. The CPUType is a magic
+  // number from /usr/include/mach/machine.h.  It is ok to reproduce the
+  // specific constants here because they are implicitly part of the Darwin ABI.
+  enum {
+    DARWIN_CPU_ARCH_ABI64      = 0x01000000,
+    DARWIN_CPU_TYPE_X86        = 7,
+    DARWIN_CPU_TYPE_ARM        = 12,
+    DARWIN_CPU_TYPE_POWERPC    = 18
+  };
+
+  Triple::ArchType Arch = TT.getArch();
+  if (Arch == Triple::x86_64)
+    CPUType = DARWIN_CPU_TYPE_X86 | DARWIN_CPU_ARCH_ABI64;
+  else if (Arch == Triple::x86)
+    CPUType = DARWIN_CPU_TYPE_X86;
+  else if (Arch == Triple::ppc)
+    CPUType = DARWIN_CPU_TYPE_POWERPC;
+  else if (Arch == Triple::ppc64)
+    CPUType = DARWIN_CPU_TYPE_POWERPC | DARWIN_CPU_ARCH_ABI64;
+  else if (Arch == Triple::arm || Arch == Triple::thumb)
+    CPUType = DARWIN_CPU_TYPE_ARM;
+
+  // Traditional Bitcode starts after header.
+  assert(Buffer.size() >= BWH_HeaderSize &&
+         "Expected header size to be reserved");
+  unsigned BCOffset = BWH_HeaderSize;
+  unsigned BCSize = Buffer.size() - BWH_HeaderSize;
+
+  // Write the magic and version.
+  unsigned Position = 0;
+  writeInt32ToBuffer(0x0B17C0DE, Buffer, Position);
+  writeInt32ToBuffer(0, Buffer, Position); // Version.
+  writeInt32ToBuffer(BCOffset, Buffer, Position);
+  writeInt32ToBuffer(BCSize, Buffer, Position);
+  writeInt32ToBuffer(CPUType, Buffer, Position);
+
+  // If the file is not a multiple of 16 bytes, insert dummy padding.
+  while (Buffer.size() & 15)
+    Buffer.push_back(0);
+}
+
+/// Helper to write the header common to all bitcode files.
+static void writeBitcodeHeader(BitstreamWriter &Stream) {
+  // Emit the file header.
+  Stream.Emit((unsigned)'B', 8);
+  Stream.Emit((unsigned)'C', 8);
+  Stream.Emit(0x0, 4);
+  Stream.Emit(0xC, 4);
+  Stream.Emit(0xE, 4);
+  Stream.Emit(0xD, 4);
+}
+
+BitcodeWriter::BitcodeWriter(SmallVectorImpl<char> &Buffer)
+    : Buffer(Buffer), Stream(new BitstreamWriter(Buffer)) {
+  writeBitcodeHeader(*Stream);
+}
+
+BitcodeWriter::~BitcodeWriter() = default;
+
+void BitcodeWriter::writeModule(const Module *M,
+                                bool ShouldPreserveUseListOrder,
+                                const ModuleSummaryIndex *Index,
+                                bool GenerateHash) {
+  ModuleBitcodeWriter ModuleWriter(
+      M, Buffer, *Stream, ShouldPreserveUseListOrder, Index, GenerateHash);
+  ModuleWriter.write();
+}
+
+/// WriteBitcodeToFile - Write the specified module to the specified output
+/// stream.
+void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out,
+                              bool ShouldPreserveUseListOrder,
+                              const ModuleSummaryIndex *Index,
+                              bool GenerateHash) {
+  SmallVector<char, 0> Buffer;
+  Buffer.reserve(256*1024);
+
+  // If this is darwin or another generic macho target, reserve space for the
+  // header.
+  Triple TT(M->getTargetTriple());
+  if (TT.isOSDarwin() || TT.isOSBinFormatMachO())
+    Buffer.insert(Buffer.begin(), BWH_HeaderSize, 0);
+
+  BitcodeWriter Writer(Buffer);
+  Writer.writeModule(M, ShouldPreserveUseListOrder, Index, GenerateHash);
+
+  if (TT.isOSDarwin() || TT.isOSBinFormatMachO())
+    emitDarwinBCHeaderAndTrailer(Buffer, TT);
+
+  // Write the generated bitstream to "Out".
+  Out.write((char*)&Buffer.front(), Buffer.size());
+}
+
+void IndexBitcodeWriter::write() {
+  Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
+
+  SmallVector<unsigned, 1> Vals;
+  unsigned CurVersion = 1;
+  Vals.push_back(CurVersion);
+  Stream.EmitRecord(bitc::MODULE_CODE_VERSION, Vals);
+
+  // If we have a VST, write the VSTOFFSET record placeholder.
+  writeValueSymbolTableForwardDecl();
+
+  // Write the module paths in the combined index.
+  writeModStrings();
+
+  // Write the summary combined index records.
+  writeCombinedGlobalValueSummary();
+
+  // Need a special VST writer for the combined index (we don't have a
+  // real VST and real values when this is invoked).
+  writeCombinedValueSymbolTable();
+
+  Stream.ExitBlock();
+}
+
+// Write the specified module summary index to the given raw output stream,
+// where it will be written in a new bitcode block. This is used when
+// writing the combined index file for ThinLTO. When writing a subset of the
+// index for a distributed backend, provide a \p ModuleToSummariesForIndex map.
+void llvm::WriteIndexToFile(
+    const ModuleSummaryIndex &Index, raw_ostream &Out,
+    const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) {
+  SmallVector<char, 0> Buffer;
+  Buffer.reserve(256 * 1024);
+
+  BitstreamWriter Stream(Buffer);
+  writeBitcodeHeader(Stream);
+
+  IndexBitcodeWriter IndexWriter(Stream, Index, ModuleToSummariesForIndex);
+  IndexWriter.write();
+
+  Out.write((char *)&Buffer.front(), Buffer.size());
+}
diff --git a/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp.patch b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp.patch
new file mode 100644
index 0000000000..18a91b2c7c
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/Bitcode/Writer/BitcodeWriter.cpp.patch
@@ -0,0 +1,18 @@
+--- ../../../lib/Bitcode/Writer/BitcodeWriter.cpp	2019-12-29 18:23:35.504922082 -0600
++++ lib/Bitcode/Writer/BitcodeWriter.cpp	2019-12-29 18:47:13.430999323 -0600
+@@ -699,6 +699,15 @@
+     return bitc::ATTR_KIND_WRITEONLY;
+   case Attribute::ZExt:
+     return bitc::ATTR_KIND_Z_EXT;
++
++  // VISC Attributes
++  case Attribute::In:
++    return bitc::ATTR_KIND_IN;
++  case Attribute::Out:
++    return bitc::ATTR_KIND_OUT;
++  case Attribute::InOut:
++    return bitc::ATTR_KIND_INOUT;
++
+   case Attribute::EndAttrKinds:
+     llvm_unreachable("Can not encode end-attribute kinds marker.");
+   case Attribute::None:
diff --git a/llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp b/llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp
new file mode 100644
index 0000000000..de6872d00f
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp
@@ -0,0 +1,1525 @@
+//===-- Attributes.cpp - Implement AttributesList -------------------------===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// \file
+// \brief This file implements the Attribute, AttributeImpl, AttrBuilder,
+// AttributeSetImpl, and AttributeSet classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Function.h"
+#include "AttributeImpl.h"
+#include "LLVMContextImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Attribute Construction Methods
+//===----------------------------------------------------------------------===//
+
+// allocsize has two integer arguments, but because they're both 32 bits, we can
+// pack them into one 64-bit value, at the cost of making said value
+// nonsensical.
+//
+// In order to do this, we need to reserve one value of the second (optional)
+// allocsize argument to signify "not present."
+static const unsigned AllocSizeNumElemsNotPresent = -1;
+
+static uint64_t packAllocSizeArgs(unsigned ElemSizeArg,
+                                  const Optional<unsigned> &NumElemsArg) {
+  assert((!NumElemsArg.hasValue() ||
+          *NumElemsArg != AllocSizeNumElemsNotPresent) &&
+         "Attempting to pack a reserved value");
+
+  return uint64_t(ElemSizeArg) << 32 |
+         NumElemsArg.getValueOr(AllocSizeNumElemsNotPresent);
+}
+
+static std::pair<unsigned, Optional<unsigned>>
+unpackAllocSizeArgs(uint64_t Num) {
+  unsigned NumElems = Num & std::numeric_limits<unsigned>::max();
+  unsigned ElemSizeArg = Num >> 32;
+
+  Optional<unsigned> NumElemsArg;
+  if (NumElems != AllocSizeNumElemsNotPresent)
+    NumElemsArg = NumElems;
+  return std::make_pair(ElemSizeArg, NumElemsArg);
+}
+
+Attribute Attribute::get(LLVMContext &Context, Attribute::AttrKind Kind,
+                         uint64_t Val) {
+  LLVMContextImpl *pImpl = Context.pImpl;
+  FoldingSetNodeID ID;
+  ID.AddInteger(Kind);
+  if (Val) ID.AddInteger(Val);
+
+  void *InsertPoint;
+  AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+  if (!PA) {
+    // If we didn't find any existing attributes of the same shape then create a
+    // new one and insert it.
+    if (!Val)
+      PA = new EnumAttributeImpl(Kind);
+    else
+      PA = new IntAttributeImpl(Kind, Val);
+    pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+  }
+
+  // Return the Attribute that we found or created.
+  return Attribute(PA);
+}
+
+Attribute Attribute::get(LLVMContext &Context, StringRef Kind, StringRef Val) {
+  LLVMContextImpl *pImpl = Context.pImpl;
+  FoldingSetNodeID ID;
+  ID.AddString(Kind);
+  if (!Val.empty()) ID.AddString(Val);
+
+  void *InsertPoint;
+  AttributeImpl *PA = pImpl->AttrsSet.FindNodeOrInsertPos(ID, InsertPoint);
+
+  if (!PA) {
+    // If we didn't find any existing attributes of the same shape then create a
+    // new one and insert it.
+    PA = new StringAttributeImpl(Kind, Val);
+    pImpl->AttrsSet.InsertNode(PA, InsertPoint);
+  }
+
+  // Return the Attribute that we found or created.
+  return Attribute(PA);
+}
+
+Attribute Attribute::getWithAlignment(LLVMContext &Context, uint64_t Align) {
+  assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+  assert(Align <= 0x40000000 && "Alignment too large.");
+  return get(Context, Alignment, Align);
+}
+
+Attribute Attribute::getWithStackAlignment(LLVMContext &Context,
+                                           uint64_t Align) {
+  assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+  assert(Align <= 0x100 && "Alignment too large.");
+  return get(Context, StackAlignment, Align);
+}
+
+Attribute Attribute::getWithDereferenceableBytes(LLVMContext &Context,
+                                                uint64_t Bytes) {
+  assert(Bytes && "Bytes must be non-zero.");
+  return get(Context, Dereferenceable, Bytes);
+}
+
+Attribute Attribute::getWithDereferenceableOrNullBytes(LLVMContext &Context,
+                                                       uint64_t Bytes) {
+  assert(Bytes && "Bytes must be non-zero.");
+  return get(Context, DereferenceableOrNull, Bytes);
+}
+
+Attribute
+Attribute::getWithAllocSizeArgs(LLVMContext &Context, unsigned ElemSizeArg,
+                                const Optional<unsigned> &NumElemsArg) {
+  assert(!(ElemSizeArg == 0 && NumElemsArg && *NumElemsArg == 0) &&
+         "Invalid allocsize arguments -- given allocsize(0, 0)");
+  return get(Context, AllocSize, packAllocSizeArgs(ElemSizeArg, NumElemsArg));
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Accessor Methods
+//===----------------------------------------------------------------------===//
+
+bool Attribute::isEnumAttribute() const {
+  return pImpl && pImpl->isEnumAttribute();
+}
+
+bool Attribute::isIntAttribute() const {
+  return pImpl && pImpl->isIntAttribute();
+}
+
+bool Attribute::isStringAttribute() const {
+  return pImpl && pImpl->isStringAttribute();
+}
+
+Attribute::AttrKind Attribute::getKindAsEnum() const {
+  if (!pImpl) return None;
+  assert((isEnumAttribute() || isIntAttribute()) &&
+         "Invalid attribute type to get the kind as an enum!");
+  return pImpl->getKindAsEnum();
+}
+
+uint64_t Attribute::getValueAsInt() const {
+  if (!pImpl) return 0;
+  assert(isIntAttribute() &&
+         "Expected the attribute to be an integer attribute!");
+  return pImpl->getValueAsInt();
+}
+
+StringRef Attribute::getKindAsString() const {
+  if (!pImpl) return StringRef();
+  assert(isStringAttribute() &&
+         "Invalid attribute type to get the kind as a string!");
+  return pImpl->getKindAsString();
+}
+
+StringRef Attribute::getValueAsString() const {
+  if (!pImpl) return StringRef();
+  assert(isStringAttribute() &&
+         "Invalid attribute type to get the value as a string!");
+  return pImpl->getValueAsString();
+}
+
+bool Attribute::hasAttribute(AttrKind Kind) const {
+  return (pImpl && pImpl->hasAttribute(Kind)) || (!pImpl && Kind == None);
+}
+
+bool Attribute::hasAttribute(StringRef Kind) const {
+  if (!isStringAttribute()) return false;
+  return pImpl && pImpl->hasAttribute(Kind);
+}
+
+unsigned Attribute::getAlignment() const {
+  assert(hasAttribute(Attribute::Alignment) &&
+         "Trying to get alignment from non-alignment attribute!");
+  return pImpl->getValueAsInt();
+}
+
+unsigned Attribute::getStackAlignment() const {
+  assert(hasAttribute(Attribute::StackAlignment) &&
+         "Trying to get alignment from non-alignment attribute!");
+  return pImpl->getValueAsInt();
+}
+
+uint64_t Attribute::getDereferenceableBytes() const {
+  assert(hasAttribute(Attribute::Dereferenceable) &&
+         "Trying to get dereferenceable bytes from "
+         "non-dereferenceable attribute!");
+  return pImpl->getValueAsInt();
+}
+
+uint64_t Attribute::getDereferenceableOrNullBytes() const {
+  assert(hasAttribute(Attribute::DereferenceableOrNull) &&
+         "Trying to get dereferenceable bytes from "
+         "non-dereferenceable attribute!");
+  return pImpl->getValueAsInt();
+}
+
+std::pair<unsigned, Optional<unsigned>> Attribute::getAllocSizeArgs() const {
+  assert(hasAttribute(Attribute::AllocSize) &&
+         "Trying to get allocsize args from non-allocsize attribute");
+  return unpackAllocSizeArgs(pImpl->getValueAsInt());
+}
+
+std::string Attribute::getAsString(bool InAttrGrp) const {
+  if (!pImpl) return "";
+
+  if (hasAttribute(Attribute::SanitizeAddress))
+    return "sanitize_address";
+  if (hasAttribute(Attribute::AlwaysInline))
+    return "alwaysinline";
+  if (hasAttribute(Attribute::ArgMemOnly))
+    return "argmemonly";
+  if (hasAttribute(Attribute::Builtin))
+    return "builtin";
+  if (hasAttribute(Attribute::ByVal))
+    return "byval";
+  if (hasAttribute(Attribute::Convergent))
+    return "convergent";
+  if (hasAttribute(Attribute::SwiftError))
+    return "swifterror";
+  if (hasAttribute(Attribute::SwiftSelf))
+    return "swiftself";
+  if (hasAttribute(Attribute::InaccessibleMemOnly))
+    return "inaccessiblememonly";
+  if (hasAttribute(Attribute::InaccessibleMemOrArgMemOnly))
+    return "inaccessiblemem_or_argmemonly";
+  if (hasAttribute(Attribute::InAlloca))
+    return "inalloca";
+  if (hasAttribute(Attribute::InlineHint))
+    return "inlinehint";
+  if (hasAttribute(Attribute::InReg))
+    return "inreg";
+  if (hasAttribute(Attribute::JumpTable))
+    return "jumptable";
+  if (hasAttribute(Attribute::MinSize))
+    return "minsize";
+  if (hasAttribute(Attribute::Naked))
+    return "naked";
+  if (hasAttribute(Attribute::Nest))
+    return "nest";
+  if (hasAttribute(Attribute::NoAlias))
+    return "noalias";
+  if (hasAttribute(Attribute::NoBuiltin))
+    return "nobuiltin";
+  if (hasAttribute(Attribute::NoCapture))
+    return "nocapture";
+  if (hasAttribute(Attribute::NoDuplicate))
+    return "noduplicate";
+  if (hasAttribute(Attribute::NoImplicitFloat))
+    return "noimplicitfloat";
+  if (hasAttribute(Attribute::NoInline))
+    return "noinline";
+  if (hasAttribute(Attribute::NonLazyBind))
+    return "nonlazybind";
+  if (hasAttribute(Attribute::NonNull))
+    return "nonnull";
+  if (hasAttribute(Attribute::NoRedZone))
+    return "noredzone";
+  if (hasAttribute(Attribute::NoReturn))
+    return "noreturn";
+  if (hasAttribute(Attribute::NoRecurse))
+    return "norecurse";
+  if (hasAttribute(Attribute::NoUnwind))
+    return "nounwind";
+  if (hasAttribute(Attribute::OptimizeNone))
+    return "optnone";
+  if (hasAttribute(Attribute::OptimizeForSize))
+    return "optsize";
+  if (hasAttribute(Attribute::ReadNone))
+    return "readnone";
+  if (hasAttribute(Attribute::ReadOnly))
+    return "readonly";
+  if (hasAttribute(Attribute::WriteOnly))
+    return "writeonly";
+  if (hasAttribute(Attribute::Returned))
+    return "returned";
+  if (hasAttribute(Attribute::ReturnsTwice))
+    return "returns_twice";
+  if (hasAttribute(Attribute::SExt))
+    return "signext";
+  if (hasAttribute(Attribute::StackProtect))
+    return "ssp";
+  if (hasAttribute(Attribute::StackProtectReq))
+    return "sspreq";
+  if (hasAttribute(Attribute::StackProtectStrong))
+    return "sspstrong";
+  if (hasAttribute(Attribute::SafeStack))
+    return "safestack";
+  if (hasAttribute(Attribute::StructRet))
+    return "sret";
+  if (hasAttribute(Attribute::SanitizeThread))
+    return "sanitize_thread";
+  if (hasAttribute(Attribute::SanitizeMemory))
+    return "sanitize_memory";
+  if (hasAttribute(Attribute::UWTable))
+    return "uwtable";
+  if (hasAttribute(Attribute::ZExt))
+    return "zeroext";
+  if (hasAttribute(Attribute::Cold))
+    return "cold";
+
+  // FIXME: These should be output like this:
+  //
+  //   align=4
+  //   alignstack=8
+  //
+  if (hasAttribute(Attribute::Alignment)) {
+    std::string Result;
+    Result += "align";
+    Result += (InAttrGrp) ? "=" : " ";
+    Result += utostr(getValueAsInt());
+    return Result;
+  }
+
+  auto AttrWithBytesToString = [&](const char *Name) {
+    std::string Result;
+    Result += Name;
+    if (InAttrGrp) {
+      Result += "=";
+      Result += utostr(getValueAsInt());
+    } else {
+      Result += "(";
+      Result += utostr(getValueAsInt());
+      Result += ")";
+    }
+    return Result;
+  };
+
+  if (hasAttribute(Attribute::StackAlignment))
+    return AttrWithBytesToString("alignstack");
+
+  if (hasAttribute(Attribute::Dereferenceable))
+    return AttrWithBytesToString("dereferenceable");
+
+  if (hasAttribute(Attribute::DereferenceableOrNull))
+    return AttrWithBytesToString("dereferenceable_or_null");
+
+  if (hasAttribute(Attribute::AllocSize)) {
+    unsigned ElemSize;
+    Optional<unsigned> NumElems;
+    std::tie(ElemSize, NumElems) = getAllocSizeArgs();
+
+    std::string Result = "allocsize(";
+    Result += utostr(ElemSize);
+    if (NumElems.hasValue()) {
+      Result += ',';
+      Result += utostr(*NumElems);
+    }
+    Result += ')';
+    return Result;
+  }
+
+  // Convert target-dependent attributes to strings of the form:
+  //
+  //   "kind"
+  //   "kind" = "value"
+  //
+  if (isStringAttribute()) {
+    std::string Result;
+    Result += (Twine('"') + getKindAsString() + Twine('"')).str();
+
+    std::string AttrVal = pImpl->getValueAsString();
+    if (AttrVal.empty()) return Result;
+
+    // Since some attribute strings contain special characters that cannot be
+    // printable, those have to be escaped to make the attribute value printable
+    // as is.  e.g. "\01__gnu_mcount_nc"
+    {
+      raw_string_ostream OS(Result);
+      OS << "=\"";
+      PrintEscapedString(AttrVal, OS);
+      OS << "\"";
+    }
+    return Result;
+  }
+
+  // VISC attributes for arguments
+  if (hasAttribute(Attribute::In))
+    return "in";
+  if (hasAttribute(Attribute::Out))
+    return "out";
+  if (hasAttribute(Attribute::InOut))
+    return "inout";
+
+  llvm_unreachable("Unknown attribute");
+}
+
+bool Attribute::operator<(Attribute A) const {
+  if (!pImpl && !A.pImpl) return false;
+  if (!pImpl) return true;
+  if (!A.pImpl) return false;
+  return *pImpl < *A.pImpl;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeImpl Definition
+//===----------------------------------------------------------------------===//
+
+// Pin the vtables to this file.
+AttributeImpl::~AttributeImpl() {}
+void EnumAttributeImpl::anchor() {}
+void IntAttributeImpl::anchor() {}
+void StringAttributeImpl::anchor() {}
+
+bool AttributeImpl::hasAttribute(Attribute::AttrKind A) const {
+  if (isStringAttribute()) return false;
+  return getKindAsEnum() == A;
+}
+
+bool AttributeImpl::hasAttribute(StringRef Kind) const {
+  if (!isStringAttribute()) return false;
+  return getKindAsString() == Kind;
+}
+
+Attribute::AttrKind AttributeImpl::getKindAsEnum() const {
+  assert(isEnumAttribute() || isIntAttribute());
+  return static_cast<const EnumAttributeImpl *>(this)->getEnumKind();
+}
+
+uint64_t AttributeImpl::getValueAsInt() const {
+  assert(isIntAttribute());
+  return static_cast<const IntAttributeImpl *>(this)->getValue();
+}
+
+StringRef AttributeImpl::getKindAsString() const {
+  assert(isStringAttribute());
+  return static_cast<const StringAttributeImpl *>(this)->getStringKind();
+}
+
+StringRef AttributeImpl::getValueAsString() const {
+  assert(isStringAttribute());
+  return static_cast<const StringAttributeImpl *>(this)->getStringValue();
+}
+
+bool AttributeImpl::operator<(const AttributeImpl &AI) const {
+  // This sorts the attributes with Attribute::AttrKinds coming first (sorted
+  // relative to their enum value) and then strings.
+  if (isEnumAttribute()) {
+    if (AI.isEnumAttribute()) return getKindAsEnum() < AI.getKindAsEnum();
+    if (AI.isIntAttribute()) return true;
+    if (AI.isStringAttribute()) return true;
+  }
+
+  if (isIntAttribute()) {
+    if (AI.isEnumAttribute()) return false;
+    if (AI.isIntAttribute()) {
+      if (getKindAsEnum() == AI.getKindAsEnum())
+        return getValueAsInt() < AI.getValueAsInt();
+      return getKindAsEnum() < AI.getKindAsEnum();
+    }
+    if (AI.isStringAttribute()) return true;
+  }
+
+  if (AI.isEnumAttribute()) return false;
+  if (AI.isIntAttribute()) return false;
+  if (getKindAsString() == AI.getKindAsString())
+    return getValueAsString() < AI.getValueAsString();
+  return getKindAsString() < AI.getKindAsString();
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSetNode Definition
+//===----------------------------------------------------------------------===//
+
+AttributeSetNode *AttributeSetNode::get(LLVMContext &C,
+                                        ArrayRef<Attribute> Attrs) {
+  if (Attrs.empty())
+    return nullptr;
+
+  // Otherwise, build a key to look up the existing attributes.
+  LLVMContextImpl *pImpl = C.pImpl;
+  FoldingSetNodeID ID;
+
+  SmallVector<Attribute, 8> SortedAttrs(Attrs.begin(), Attrs.end());
+  std::sort(SortedAttrs.begin(), SortedAttrs.end());
+
+  for (Attribute Attr : SortedAttrs)
+    Attr.Profile(ID);
+
+  void *InsertPoint;
+  AttributeSetNode *PA =
+    pImpl->AttrsSetNodes.FindNodeOrInsertPos(ID, InsertPoint);
+
+  // If we didn't find any existing attributes of the same shape then create a
+  // new one and insert it.
+  if (!PA) {
+    // Coallocate entries after the AttributeSetNode itself.
+    void *Mem = ::operator new(totalSizeToAlloc<Attribute>(SortedAttrs.size()));
+    PA = new (Mem) AttributeSetNode(SortedAttrs);
+    pImpl->AttrsSetNodes.InsertNode(PA, InsertPoint);
+  }
+
+  // Return the AttributesListNode that we found or created.
+  return PA;
+}
+
+bool AttributeSetNode::hasAttribute(StringRef Kind) const {
+  for (Attribute I : *this)
+    if (I.hasAttribute(Kind))
+      return true;
+  return false;
+}
+
+Attribute AttributeSetNode::getAttribute(Attribute::AttrKind Kind) const {
+  if (hasAttribute(Kind)) {
+    for (Attribute I : *this)
+      if (I.hasAttribute(Kind))
+        return I;
+  }
+  return Attribute();
+}
+
+Attribute AttributeSetNode::getAttribute(StringRef Kind) const {
+  for (Attribute I : *this)
+    if (I.hasAttribute(Kind))
+      return I;
+  return Attribute();
+}
+
+unsigned AttributeSetNode::getAlignment() const {
+  for (Attribute I : *this)
+    if (I.hasAttribute(Attribute::Alignment))
+      return I.getAlignment();
+  return 0;
+}
+
+unsigned AttributeSetNode::getStackAlignment() const {
+  for (Attribute I : *this)
+    if (I.hasAttribute(Attribute::StackAlignment))
+      return I.getStackAlignment();
+  return 0;
+}
+
+uint64_t AttributeSetNode::getDereferenceableBytes() const {
+  for (Attribute I : *this)
+    if (I.hasAttribute(Attribute::Dereferenceable))
+      return I.getDereferenceableBytes();
+  return 0;
+}
+
+uint64_t AttributeSetNode::getDereferenceableOrNullBytes() const {
+  for (Attribute I : *this)
+    if (I.hasAttribute(Attribute::DereferenceableOrNull))
+      return I.getDereferenceableOrNullBytes();
+  return 0;
+}
+
+std::pair<unsigned, Optional<unsigned>>
+AttributeSetNode::getAllocSizeArgs() const {
+  for (Attribute I : *this)
+    if (I.hasAttribute(Attribute::AllocSize))
+      return I.getAllocSizeArgs();
+  return std::make_pair(0, 0);
+}
+
+std::string AttributeSetNode::getAsString(bool InAttrGrp) const {
+  std::string Str;
+  for (iterator I = begin(), E = end(); I != E; ++I) {
+    if (I != begin())
+      Str += ' ';
+    Str += I->getAsString(InAttrGrp);
+  }
+  return Str;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSetImpl Definition
+//===----------------------------------------------------------------------===//
+
+LLVM_DUMP_METHOD void AttributeSetImpl::dump() const {
+  AttributeSet(const_cast<AttributeSetImpl *>(this)).dump();
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSet Construction and Mutation Methods
+//===----------------------------------------------------------------------===//
+
+AttributeSet
+AttributeSet::getImpl(LLVMContext &C,
+                      ArrayRef<std::pair<unsigned, AttributeSetNode*> > Attrs) {
+  LLVMContextImpl *pImpl = C.pImpl;
+  FoldingSetNodeID ID;
+  AttributeSetImpl::Profile(ID, Attrs);
+
+  void *InsertPoint;
+  AttributeSetImpl *PA = pImpl->AttrsLists.FindNodeOrInsertPos(ID, InsertPoint);
+
+  // If we didn't find any existing attributes of the same shape then
+  // create a new one and insert it.
+  if (!PA) {
+    // Coallocate entries after the AttributeSetImpl itself.
+    void *Mem = ::operator new(
+        AttributeSetImpl::totalSizeToAlloc<IndexAttrPair>(Attrs.size()));
+    PA = new (Mem) AttributeSetImpl(C, Attrs);
+    pImpl->AttrsLists.InsertNode(PA, InsertPoint);
+  }
+
+  // Return the AttributesList that we found or created.
+  return AttributeSet(PA);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C,
+                               ArrayRef<std::pair<unsigned, Attribute> > Attrs){
+  // If there are no attributes then return a null AttributesList pointer.
+  if (Attrs.empty())
+    return AttributeSet();
+
+  assert(std::is_sorted(Attrs.begin(), Attrs.end(),
+                        [](const std::pair<unsigned, Attribute> &LHS,
+                           const std::pair<unsigned, Attribute> &RHS) {
+                          return LHS.first < RHS.first;
+                        }) && "Misordered Attributes list!");
+  assert(none_of(Attrs,
+                 [](const std::pair<unsigned, Attribute> &Pair) {
+                   return Pair.second.hasAttribute(Attribute::None);
+                 }) &&
+         "Pointless attribute!");
+
+  // Create a vector if (unsigned, AttributeSetNode*) pairs from the attributes
+  // list.
+  SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrPairVec;
+  for (ArrayRef<std::pair<unsigned, Attribute> >::iterator I = Attrs.begin(),
+         E = Attrs.end(); I != E; ) {
+    unsigned Index = I->first;
+    SmallVector<Attribute, 4> AttrVec;
+    while (I != E && I->first == Index) {
+      AttrVec.push_back(I->second);
+      ++I;
+    }
+
+    AttrPairVec.emplace_back(Index, AttributeSetNode::get(C, AttrVec));
+  }
+
+  return getImpl(C, AttrPairVec);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C,
+                               ArrayRef<std::pair<unsigned,
+                                                  AttributeSetNode*> > Attrs) {
+  // If there are no attributes then return a null AttributesList pointer.
+  if (Attrs.empty())
+    return AttributeSet();
+
+  return getImpl(C, Attrs);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
+                               const AttrBuilder &B) {
+  if (!B.hasAttributes())
+    return AttributeSet();
+
+  // Add target-independent attributes.
+  SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+  for (Attribute::AttrKind Kind = Attribute::None;
+       Kind != Attribute::EndAttrKinds; Kind = Attribute::AttrKind(Kind + 1)) {
+    if (!B.contains(Kind))
+      continue;
+
+    Attribute Attr;
+    switch (Kind) {
+    case Attribute::Alignment:
+      Attr = Attribute::getWithAlignment(C, B.getAlignment());
+      break;
+    case Attribute::StackAlignment:
+      Attr = Attribute::getWithStackAlignment(C, B.getStackAlignment());
+      break;
+    case Attribute::Dereferenceable:
+      Attr = Attribute::getWithDereferenceableBytes(
+          C, B.getDereferenceableBytes());
+      break;
+    case Attribute::DereferenceableOrNull:
+      Attr = Attribute::getWithDereferenceableOrNullBytes(
+          C, B.getDereferenceableOrNullBytes());
+      break;
+    case Attribute::AllocSize: {
+      auto A = B.getAllocSizeArgs();
+      Attr = Attribute::getWithAllocSizeArgs(C, A.first, A.second);
+      break;
+    }
+    default:
+      Attr = Attribute::get(C, Kind);
+    }
+    Attrs.emplace_back(Index, Attr);
+  }
+
+  // Add target-dependent (string) attributes.
+  for (const auto &TDA : B.td_attrs())
+    Attrs.emplace_back(Index, Attribute::get(C, TDA.first, TDA.second));
+
+  return get(C, Attrs);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
+                               ArrayRef<Attribute::AttrKind> Kinds) {
+  SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+  for (Attribute::AttrKind K : Kinds)
+    Attrs.emplace_back(Index, Attribute::get(C, K));
+  return get(C, Attrs);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, unsigned Index,
+                               ArrayRef<StringRef> Kinds) {
+  SmallVector<std::pair<unsigned, Attribute>, 8> Attrs;
+  for (StringRef K : Kinds)
+    Attrs.emplace_back(Index, Attribute::get(C, K));
+  return get(C, Attrs);
+}
+
+AttributeSet AttributeSet::get(LLVMContext &C, ArrayRef<AttributeSet> Attrs) {
+  if (Attrs.empty()) return AttributeSet();
+  if (Attrs.size() == 1) return Attrs[0];
+
+  SmallVector<std::pair<unsigned, AttributeSetNode*>, 8> AttrNodeVec;
+  AttributeSetImpl *A0 = Attrs[0].pImpl;
+  if (A0)
+    AttrNodeVec.append(A0->getNode(0), A0->getNode(A0->getNumSlots()));
+  // Copy all attributes from Attrs into AttrNodeVec while keeping AttrNodeVec
+  // ordered by index.  Because we know that each list in Attrs is ordered by
+  // index we only need to merge each successive list in rather than doing a
+  // full sort.
+  for (unsigned I = 1, E = Attrs.size(); I != E; ++I) {
+    AttributeSetImpl *AS = Attrs[I].pImpl;
+    if (!AS) continue;
+    SmallVector<std::pair<unsigned, AttributeSetNode *>, 8>::iterator
+      ANVI = AttrNodeVec.begin(), ANVE;
+    for (const IndexAttrPair *AI = AS->getNode(0),
+                             *AE = AS->getNode(AS->getNumSlots());
+         AI != AE; ++AI) {
+      ANVE = AttrNodeVec.end();
+      while (ANVI != ANVE && ANVI->first <= AI->first)
+        ++ANVI;
+      ANVI = AttrNodeVec.insert(ANVI, *AI) + 1;
+    }
+  }
+
+  return getImpl(C, AttrNodeVec);
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index,
+                                        Attribute::AttrKind Kind) const {
+  if (hasAttribute(Index, Kind)) return *this;
+  return addAttributes(C, Index, AttributeSet::get(C, Index, Kind));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C, unsigned Index,
+                                        StringRef Kind, StringRef Value) const {
+  llvm::AttrBuilder B;
+  B.addAttribute(Kind, Value);
+  return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+AttributeSet AttributeSet::addAttribute(LLVMContext &C,
+                                        ArrayRef<unsigned> Indices,
+                                        Attribute A) const {
+  unsigned I = 0, E = pImpl ? pImpl->getNumSlots() : 0;
+  auto IdxI = Indices.begin(), IdxE = Indices.end();
+  SmallVector<AttributeSet, 4> AttrSet;
+
+  while (I != E && IdxI != IdxE) {
+    if (getSlotIndex(I) < *IdxI)
+      AttrSet.emplace_back(getSlotAttributes(I++));
+    else if (getSlotIndex(I) > *IdxI)
+      AttrSet.emplace_back(AttributeSet::get(C, std::make_pair(*IdxI++, A)));
+    else {
+      AttrBuilder B(getSlotAttributes(I), *IdxI);
+      B.addAttribute(A);
+      AttrSet.emplace_back(AttributeSet::get(C, *IdxI, B));
+      ++I;
+      ++IdxI;
+    }
+  }
+
+  while (I != E)
+    AttrSet.emplace_back(getSlotAttributes(I++));
+
+  while (IdxI != IdxE)
+    AttrSet.emplace_back(AttributeSet::get(C, std::make_pair(*IdxI++, A)));
+
+  return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::addAttributes(LLVMContext &C, unsigned Index,
+                                         AttributeSet Attrs) const {
+  if (!pImpl) return Attrs;
+  if (!Attrs.pImpl) return *this;
+
+#ifndef NDEBUG
+  // FIXME it is not obvious how this should work for alignment. For now, say
+  // we can't change a known alignment.
+  unsigned OldAlign = getParamAlignment(Index);
+  unsigned NewAlign = Attrs.getParamAlignment(Index);
+  assert((!OldAlign || !NewAlign || OldAlign == NewAlign) &&
+         "Attempt to change alignment!");
+#endif
+
+  // Add the attribute slots before the one we're trying to add.
+  SmallVector<AttributeSet, 4> AttrSet;
+  uint64_t NumAttrs = pImpl->getNumSlots();
+  AttributeSet AS;
+  uint64_t LastIndex = 0;
+  for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
+    if (getSlotIndex(I) >= Index) {
+      if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
+      break;
+    }
+    LastIndex = I + 1;
+    AttrSet.push_back(getSlotAttributes(I));
+  }
+
+  // Now add the attribute into the correct slot. There may already be an
+  // AttributeSet there.
+  AttrBuilder B(AS, Index);
+
+  for (unsigned I = 0, E = Attrs.pImpl->getNumSlots(); I != E; ++I)
+    if (Attrs.getSlotIndex(I) == Index) {
+      for (AttributeSetImpl::iterator II = Attrs.pImpl->begin(I),
+             IE = Attrs.pImpl->end(I); II != IE; ++II)
+        B.addAttribute(*II);
+      break;
+    }
+
+  AttrSet.push_back(AttributeSet::get(C, Index, B));
+
+  // Add the remaining attribute slots.
+  for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
+    AttrSet.push_back(getSlotAttributes(I));
+
+  return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Index,
+                                           Attribute::AttrKind Kind) const {
+  if (!hasAttribute(Index, Kind)) return *this;
+  return removeAttributes(C, Index, AttributeSet::get(C, Index, Kind));
+}
+
+AttributeSet AttributeSet::removeAttribute(LLVMContext &C, unsigned Index,
+                                           StringRef Kind) const {
+  if (!hasAttribute(Index, Kind)) return *this;
+  return removeAttributes(C, Index, AttributeSet::get(C, Index, Kind));
+}
+
+AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index,
+                                            AttributeSet Attrs) const {
+  if (!pImpl) return AttributeSet();
+  if (!Attrs.pImpl) return *this;
+
+  // FIXME it is not obvious how this should work for alignment.
+  // For now, say we can't pass in alignment, which no current use does.
+  assert(!Attrs.hasAttribute(Index, Attribute::Alignment) &&
+         "Attempt to change alignment!");
+
+  // Add the attribute slots before the one we're trying to add.
+  SmallVector<AttributeSet, 4> AttrSet;
+  uint64_t NumAttrs = pImpl->getNumSlots();
+  AttributeSet AS;
+  uint64_t LastIndex = 0;
+  for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
+    if (getSlotIndex(I) >= Index) {
+      if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
+      break;
+    }
+    LastIndex = I + 1;
+    AttrSet.push_back(getSlotAttributes(I));
+  }
+
+  // Now remove the attribute from the correct slot. There may already be an
+  // AttributeSet there.
+  AttrBuilder B(AS, Index);
+
+  for (unsigned I = 0, E = Attrs.pImpl->getNumSlots(); I != E; ++I)
+    if (Attrs.getSlotIndex(I) == Index) {
+      B.removeAttributes(Attrs.pImpl->getSlotAttributes(I), Index);
+      break;
+    }
+
+  AttrSet.push_back(AttributeSet::get(C, Index, B));
+
+  // Add the remaining attribute slots.
+  for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
+    AttrSet.push_back(getSlotAttributes(I));
+
+  return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index,
+                                            const AttrBuilder &Attrs) const {
+  if (!pImpl) return AttributeSet();
+
+  // FIXME it is not obvious how this should work for alignment.
+  // For now, say we can't pass in alignment, which no current use does.
+  assert(!Attrs.hasAlignmentAttr() && "Attempt to change alignment!");
+
+  // Add the attribute slots before the one we're trying to add.
+  SmallVector<AttributeSet, 4> AttrSet;
+  uint64_t NumAttrs = pImpl->getNumSlots();
+  AttributeSet AS;
+  uint64_t LastIndex = 0;
+  for (unsigned I = 0, E = NumAttrs; I != E; ++I) {
+    if (getSlotIndex(I) >= Index) {
+      if (getSlotIndex(I) == Index) AS = getSlotAttributes(LastIndex++);
+      break;
+    }
+    LastIndex = I + 1;
+    AttrSet.push_back(getSlotAttributes(I));
+  }
+
+  // Now remove the attribute from the correct slot. There may already be an
+  // AttributeSet there.
+  AttrBuilder B(AS, Index);
+  B.remove(Attrs);
+
+  AttrSet.push_back(AttributeSet::get(C, Index, B));
+
+  // Add the remaining attribute slots.
+  for (unsigned I = LastIndex, E = NumAttrs; I < E; ++I)
+    AttrSet.push_back(getSlotAttributes(I));
+
+  return get(C, AttrSet);
+}
+
+AttributeSet AttributeSet::addDereferenceableAttr(LLVMContext &C, unsigned Index,
+                                                  uint64_t Bytes) const {
+  llvm::AttrBuilder B;
+  B.addDereferenceableAttr(Bytes);
+  return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+AttributeSet AttributeSet::addDereferenceableOrNullAttr(LLVMContext &C,
+                                                        unsigned Index,
+                                                        uint64_t Bytes) const {
+  llvm::AttrBuilder B;
+  B.addDereferenceableOrNullAttr(Bytes);
+  return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+AttributeSet
+AttributeSet::addAllocSizeAttr(LLVMContext &C, unsigned Index,
+                               unsigned ElemSizeArg,
+                               const Optional<unsigned> &NumElemsArg) {
+  llvm::AttrBuilder B;
+  B.addAllocSizeAttr(ElemSizeArg, NumElemsArg);
+  return addAttributes(C, Index, AttributeSet::get(C, Index, B));
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSet Accessor Methods
+//===----------------------------------------------------------------------===//
+
+LLVMContext &AttributeSet::getContext() const {
+  return pImpl->getContext();
+}
+
+AttributeSet AttributeSet::getParamAttributes(unsigned Index) const {
+  return pImpl && hasAttributes(Index) ?
+    AttributeSet::get(pImpl->getContext(),
+                      ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
+                        std::make_pair(Index, getAttributes(Index)))) :
+    AttributeSet();
+}
+
+AttributeSet AttributeSet::getRetAttributes() const {
+  return pImpl && hasAttributes(ReturnIndex) ?
+    AttributeSet::get(pImpl->getContext(),
+                      ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
+                        std::make_pair(ReturnIndex,
+                                       getAttributes(ReturnIndex)))) :
+    AttributeSet();
+}
+
+AttributeSet AttributeSet::getFnAttributes() const {
+  return pImpl && hasAttributes(FunctionIndex) ?
+    AttributeSet::get(pImpl->getContext(),
+                      ArrayRef<std::pair<unsigned, AttributeSetNode*> >(
+                        std::make_pair(FunctionIndex,
+                                       getAttributes(FunctionIndex)))) :
+    AttributeSet();
+}
+
+bool AttributeSet::hasAttribute(unsigned Index, Attribute::AttrKind Kind) const{
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN && ASN->hasAttribute(Kind);
+}
+
+bool AttributeSet::hasAttribute(unsigned Index, StringRef Kind) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN && ASN->hasAttribute(Kind);
+}
+
+bool AttributeSet::hasAttributes(unsigned Index) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN && ASN->hasAttributes();
+}
+
+bool AttributeSet::hasFnAttribute(Attribute::AttrKind Kind) const {
+  return pImpl && pImpl->hasFnAttribute(Kind);
+}
+
+bool AttributeSet::hasFnAttribute(StringRef Kind) const {
+  return hasAttribute(AttributeSet::FunctionIndex, Kind);
+}
+
+bool AttributeSet::hasAttrSomewhere(Attribute::AttrKind Attr,
+                                    unsigned *Index) const {
+  if (!pImpl) return false;
+
+  for (unsigned I = 0, E = pImpl->getNumSlots(); I != E; ++I)
+    for (AttributeSetImpl::iterator II = pImpl->begin(I),
+           IE = pImpl->end(I); II != IE; ++II)
+      if (II->hasAttribute(Attr)) {
+        if (Index) *Index = pImpl->getSlotIndex(I);
+        return true;
+      }
+
+  return false;
+}
+
+Attribute AttributeSet::getAttribute(unsigned Index,
+                                     Attribute::AttrKind Kind) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getAttribute(Kind) : Attribute();
+}
+
+Attribute AttributeSet::getAttribute(unsigned Index,
+                                     StringRef Kind) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getAttribute(Kind) : Attribute();
+}
+
+unsigned AttributeSet::getParamAlignment(unsigned Index) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getAlignment() : 0;
+}
+
+unsigned AttributeSet::getStackAlignment(unsigned Index) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getStackAlignment() : 0;
+}
+
+uint64_t AttributeSet::getDereferenceableBytes(unsigned Index) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getDereferenceableBytes() : 0;
+}
+
+uint64_t AttributeSet::getDereferenceableOrNullBytes(unsigned Index) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getDereferenceableOrNullBytes() : 0;
+}
+
+std::pair<unsigned, Optional<unsigned>>
+AttributeSet::getAllocSizeArgs(unsigned Index) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getAllocSizeArgs() : std::make_pair(0u, Optional<unsigned>(0u));
+}
+
+std::string AttributeSet::getAsString(unsigned Index, bool InAttrGrp) const {
+  AttributeSetNode *ASN = getAttributes(Index);
+  return ASN ? ASN->getAsString(InAttrGrp) : std::string("");
+}
+
+AttributeSetNode *AttributeSet::getAttributes(unsigned Index) const {
+  if (!pImpl) return nullptr;
+
+  // Loop through to find the attribute node we want.
+  for (unsigned I = 0, E = pImpl->getNumSlots(); I != E; ++I)
+    if (pImpl->getSlotIndex(I) == Index)
+      return pImpl->getSlotNode(I);
+
+  return nullptr;
+}
+
+AttributeSet::iterator AttributeSet::begin(unsigned Slot) const {
+  if (!pImpl)
+    return ArrayRef<Attribute>().begin();
+  return pImpl->begin(Slot);
+}
+
+AttributeSet::iterator AttributeSet::end(unsigned Slot) const {
+  if (!pImpl)
+    return ArrayRef<Attribute>().end();
+  return pImpl->end(Slot);
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeSet Introspection Methods
+//===----------------------------------------------------------------------===//
+
+unsigned AttributeSet::getNumSlots() const {
+  return pImpl ? pImpl->getNumSlots() : 0;
+}
+
+unsigned AttributeSet::getSlotIndex(unsigned Slot) const {
+  assert(pImpl && Slot < pImpl->getNumSlots() &&
+         "Slot # out of range!");
+  return pImpl->getSlotIndex(Slot);
+}
+
+AttributeSet AttributeSet::getSlotAttributes(unsigned Slot) const {
+  assert(pImpl && Slot < pImpl->getNumSlots() &&
+         "Slot # out of range!");
+  return pImpl->getSlotAttributes(Slot);
+}
+
+LLVM_DUMP_METHOD void AttributeSet::dump() const {
+  dbgs() << "PAL[\n";
+
+  for (unsigned i = 0, e = getNumSlots(); i < e; ++i) {
+    uint64_t Index = getSlotIndex(i);
+    dbgs() << "  { ";
+    if (Index == ~0U)
+      dbgs() << "~0U";
+    else
+      dbgs() << Index;
+    dbgs() << " => " << getAsString(Index) << " }\n";
+  }
+
+  dbgs() << "]\n";
+}
+
+//===----------------------------------------------------------------------===//
+// AttrBuilder Method Implementations
+//===----------------------------------------------------------------------===//
+
+AttrBuilder::AttrBuilder(AttributeSet AS, unsigned Index)
+    : Attrs(0), Alignment(0), StackAlignment(0), DerefBytes(0),
+      DerefOrNullBytes(0), AllocSizeArgs(0) {
+  AttributeSetImpl *pImpl = AS.pImpl;
+  if (!pImpl) return;
+
+  for (unsigned I = 0, E = pImpl->getNumSlots(); I != E; ++I) {
+    if (pImpl->getSlotIndex(I) != Index) continue;
+
+    for (AttributeSetImpl::iterator II = pImpl->begin(I),
+           IE = pImpl->end(I); II != IE; ++II)
+      addAttribute(*II);
+
+    break;
+  }
+}
+
+void AttrBuilder::clear() {
+  Attrs.reset();
+  TargetDepAttrs.clear();
+  Alignment = StackAlignment = DerefBytes = DerefOrNullBytes = 0;
+  AllocSizeArgs = 0;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(Attribute::AttrKind Val) {
+  assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!");
+  assert(Val != Attribute::Alignment && Val != Attribute::StackAlignment &&
+         Val != Attribute::Dereferenceable && Val != Attribute::AllocSize &&
+         "Adding integer attribute without adding a value!");
+  Attrs[Val] = true;
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(Attribute Attr) {
+  if (Attr.isStringAttribute()) {
+    addAttribute(Attr.getKindAsString(), Attr.getValueAsString());
+    return *this;
+  }
+
+  Attribute::AttrKind Kind = Attr.getKindAsEnum();
+  Attrs[Kind] = true;
+
+  if (Kind == Attribute::Alignment)
+    Alignment = Attr.getAlignment();
+  else if (Kind == Attribute::StackAlignment)
+    StackAlignment = Attr.getStackAlignment();
+  else if (Kind == Attribute::Dereferenceable)
+    DerefBytes = Attr.getDereferenceableBytes();
+  else if (Kind == Attribute::DereferenceableOrNull)
+    DerefOrNullBytes = Attr.getDereferenceableOrNullBytes();
+  else if (Kind == Attribute::AllocSize)
+    AllocSizeArgs = Attr.getValueAsInt();
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::addAttribute(StringRef A, StringRef V) {
+  TargetDepAttrs[A] = V;
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttribute(Attribute::AttrKind Val) {
+  assert((unsigned)Val < Attribute::EndAttrKinds && "Attribute out of range!");
+  Attrs[Val] = false;
+
+  if (Val == Attribute::Alignment)
+    Alignment = 0;
+  else if (Val == Attribute::StackAlignment)
+    StackAlignment = 0;
+  else if (Val == Attribute::Dereferenceable)
+    DerefBytes = 0;
+  else if (Val == Attribute::DereferenceableOrNull)
+    DerefOrNullBytes = 0;
+  else if (Val == Attribute::AllocSize)
+    AllocSizeArgs = 0;
+
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttributes(AttributeSet A, uint64_t Index) {
+  unsigned Slot = ~0U;
+  for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I)
+    if (A.getSlotIndex(I) == Index) {
+      Slot = I;
+      break;
+    }
+
+  assert(Slot != ~0U && "Couldn't find index in AttributeSet!");
+
+  for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) {
+    Attribute Attr = *I;
+    if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
+      removeAttribute(Attr.getKindAsEnum());
+    } else {
+      assert(Attr.isStringAttribute() && "Invalid attribute type!");
+      removeAttribute(Attr.getKindAsString());
+    }
+  }
+
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::removeAttribute(StringRef A) {
+  std::map<std::string, std::string>::iterator I = TargetDepAttrs.find(A);
+  if (I != TargetDepAttrs.end())
+    TargetDepAttrs.erase(I);
+  return *this;
+}
+
+std::pair<unsigned, Optional<unsigned>> AttrBuilder::getAllocSizeArgs() const {
+  return unpackAllocSizeArgs(AllocSizeArgs);
+}
+
+AttrBuilder &AttrBuilder::addAlignmentAttr(unsigned Align) {
+  if (Align == 0) return *this;
+
+  assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+  assert(Align <= 0x40000000 && "Alignment too large.");
+
+  Attrs[Attribute::Alignment] = true;
+  Alignment = Align;
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::addStackAlignmentAttr(unsigned Align) {
+  // Default alignment, allow the target to define how to align it.
+  if (Align == 0) return *this;
+
+  assert(isPowerOf2_32(Align) && "Alignment must be a power of two.");
+  assert(Align <= 0x100 && "Alignment too large.");
+
+  Attrs[Attribute::StackAlignment] = true;
+  StackAlignment = Align;
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::addDereferenceableAttr(uint64_t Bytes) {
+  if (Bytes == 0) return *this;
+
+  Attrs[Attribute::Dereferenceable] = true;
+  DerefBytes = Bytes;
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::addDereferenceableOrNullAttr(uint64_t Bytes) {
+  if (Bytes == 0)
+    return *this;
+
+  Attrs[Attribute::DereferenceableOrNull] = true;
+  DerefOrNullBytes = Bytes;
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::addAllocSizeAttr(unsigned ElemSize,
+                                           const Optional<unsigned> &NumElems) {
+  return addAllocSizeAttrFromRawRepr(packAllocSizeArgs(ElemSize, NumElems));
+}
+
+AttrBuilder &AttrBuilder::addAllocSizeAttrFromRawRepr(uint64_t RawArgs) {
+  // (0, 0) is our "not present" value, so we need to check for it here.
+  assert(RawArgs && "Invalid allocsize arguments -- given allocsize(0, 0)");
+
+  Attrs[Attribute::AllocSize] = true;
+  // Reuse existing machinery to store this as a single 64-bit integer so we can
+  // save a few bytes over using a pair<unsigned, Optional<unsigned>>.
+  AllocSizeArgs = RawArgs;
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::merge(const AttrBuilder &B) {
+  // FIXME: What if both have alignments, but they don't match?!
+  if (!Alignment)
+    Alignment = B.Alignment;
+
+  if (!StackAlignment)
+    StackAlignment = B.StackAlignment;
+
+  if (!DerefBytes)
+    DerefBytes = B.DerefBytes;
+
+  if (!DerefOrNullBytes)
+    DerefOrNullBytes = B.DerefOrNullBytes;
+
+  if (!AllocSizeArgs)
+    AllocSizeArgs = B.AllocSizeArgs;
+
+  Attrs |= B.Attrs;
+
+  for (auto I : B.td_attrs())
+    TargetDepAttrs[I.first] = I.second;
+
+  return *this;
+}
+
+AttrBuilder &AttrBuilder::remove(const AttrBuilder &B) {
+  // FIXME: What if both have alignments, but they don't match?!
+  if (B.Alignment)
+    Alignment = 0;
+
+  if (B.StackAlignment)
+    StackAlignment = 0;
+
+  if (B.DerefBytes)
+    DerefBytes = 0;
+
+  if (B.DerefOrNullBytes)
+    DerefOrNullBytes = 0;
+
+  if (B.AllocSizeArgs)
+    AllocSizeArgs = 0;
+
+  Attrs &= ~B.Attrs;
+
+  for (auto I : B.td_attrs())
+    TargetDepAttrs.erase(I.first);
+
+  return *this;
+}
+
+bool AttrBuilder::overlaps(const AttrBuilder &B) const {
+  // First check if any of the target independent attributes overlap.
+  if ((Attrs & B.Attrs).any())
+    return true;
+
+  // Then check if any target dependent ones do.
+  for (auto I : td_attrs())
+    if (B.contains(I.first))
+      return true;
+
+  return false;
+}
+
+bool AttrBuilder::contains(StringRef A) const {
+  return TargetDepAttrs.find(A) != TargetDepAttrs.end();
+}
+
+bool AttrBuilder::hasAttributes() const {
+  return !Attrs.none() || !TargetDepAttrs.empty();
+}
+
+bool AttrBuilder::hasAttributes(AttributeSet A, uint64_t Index) const {
+  unsigned Slot = ~0U;
+  for (unsigned I = 0, E = A.getNumSlots(); I != E; ++I)
+    if (A.getSlotIndex(I) == Index) {
+      Slot = I;
+      break;
+    }
+
+  assert(Slot != ~0U && "Couldn't find the index!");
+
+  for (AttributeSet::iterator I = A.begin(Slot), E = A.end(Slot); I != E; ++I) {
+    Attribute Attr = *I;
+    if (Attr.isEnumAttribute() || Attr.isIntAttribute()) {
+      if (Attrs[I->getKindAsEnum()])
+        return true;
+    } else {
+      assert(Attr.isStringAttribute() && "Invalid attribute kind!");
+      return TargetDepAttrs.find(Attr.getKindAsString())!=TargetDepAttrs.end();
+    }
+  }
+
+  return false;
+}
+
+bool AttrBuilder::hasAlignmentAttr() const {
+  return Alignment != 0;
+}
+
+bool AttrBuilder::operator==(const AttrBuilder &B) {
+  if (Attrs != B.Attrs)
+    return false;
+
+  for (td_const_iterator I = TargetDepAttrs.begin(),
+         E = TargetDepAttrs.end(); I != E; ++I)
+    if (B.TargetDepAttrs.find(I->first) == B.TargetDepAttrs.end())
+      return false;
+
+  return Alignment == B.Alignment && StackAlignment == B.StackAlignment &&
+         DerefBytes == B.DerefBytes;
+}
+
+//===----------------------------------------------------------------------===//
+// AttributeFuncs Function Defintions
+//===----------------------------------------------------------------------===//
+
+/// \brief Which attributes cannot be applied to a type.
+AttrBuilder AttributeFuncs::typeIncompatible(Type *Ty) {
+  AttrBuilder Incompatible;
+
+  if (!Ty->isIntegerTy())
+    // Attribute that only apply to integers.
+    Incompatible.addAttribute(Attribute::SExt)
+      .addAttribute(Attribute::ZExt);
+
+  if (!Ty->isPointerTy())
+    // Attribute that only apply to pointers.
+    Incompatible.addAttribute(Attribute::ByVal)
+      .addAttribute(Attribute::Nest)
+      .addAttribute(Attribute::NoAlias)
+      .addAttribute(Attribute::NoCapture)
+      .addAttribute(Attribute::NonNull)
+      .addDereferenceableAttr(1) // the int here is ignored
+      .addDereferenceableOrNullAttr(1) // the int here is ignored
+      .addAttribute(Attribute::ReadNone)
+      .addAttribute(Attribute::ReadOnly)
+      .addAttribute(Attribute::StructRet)
+      .addAttribute(Attribute::InAlloca);
+
+  return Incompatible;
+}
+
+template<typename AttrClass>
+static bool isEqual(const Function &Caller, const Function &Callee) {
+  return Caller.getFnAttribute(AttrClass::getKind()) ==
+         Callee.getFnAttribute(AttrClass::getKind());
+}
+
+/// \brief Compute the logical AND of the attributes of the caller and the
+/// callee.
+///
+/// This function sets the caller's attribute to false if the callee's attribute
+/// is false.
+template<typename AttrClass>
+static void setAND(Function &Caller, const Function &Callee) {
+  if (AttrClass::isSet(Caller, AttrClass::getKind()) &&
+      !AttrClass::isSet(Callee, AttrClass::getKind()))
+    AttrClass::set(Caller, AttrClass::getKind(), false);
+}
+
+/// \brief Compute the logical OR of the attributes of the caller and the
+/// callee.
+///
+/// This function sets the caller's attribute to true if the callee's attribute
+/// is true.
+template<typename AttrClass>
+static void setOR(Function &Caller, const Function &Callee) {
+  if (!AttrClass::isSet(Caller, AttrClass::getKind()) &&
+      AttrClass::isSet(Callee, AttrClass::getKind()))
+    AttrClass::set(Caller, AttrClass::getKind(), true);
+}
+
+/// \brief If the inlined function had a higher stack protection level than the
+/// calling function, then bump up the caller's stack protection level.
+static void adjustCallerSSPLevel(Function &Caller, const Function &Callee) {
+  // If upgrading the SSP attribute, clear out the old SSP Attributes first.
+  // Having multiple SSP attributes doesn't actually hurt, but it adds useless
+  // clutter to the IR.
+  AttrBuilder B;
+  B.addAttribute(Attribute::StackProtect)
+    .addAttribute(Attribute::StackProtectStrong)
+    .addAttribute(Attribute::StackProtectReq);
+  AttributeSet OldSSPAttr = AttributeSet::get(Caller.getContext(),
+                                              AttributeSet::FunctionIndex,
+                                              B);
+
+  if (Callee.hasFnAttribute(Attribute::StackProtectReq)) {
+    Caller.removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+    Caller.addFnAttr(Attribute::StackProtectReq);
+  } else if (Callee.hasFnAttribute(Attribute::StackProtectStrong) &&
+             !Caller.hasFnAttribute(Attribute::StackProtectReq)) {
+    Caller.removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
+    Caller.addFnAttr(Attribute::StackProtectStrong);
+  } else if (Callee.hasFnAttribute(Attribute::StackProtect) &&
+             !Caller.hasFnAttribute(Attribute::StackProtectReq) &&
+             !Caller.hasFnAttribute(Attribute::StackProtectStrong))
+    Caller.addFnAttr(Attribute::StackProtect);
+}
+
+#define GET_ATTR_COMPAT_FUNC
+#include "AttributesCompatFunc.inc"
+
+bool AttributeFuncs::areInlineCompatible(const Function &Caller,
+                                         const Function &Callee) {
+  return hasCompatibleFnAttrs(Caller, Callee);
+}
+
+
+void AttributeFuncs::mergeAttributesForInlining(Function &Caller,
+                                                const Function &Callee) {
+  mergeFnAttrs(Caller, Callee);
+}
diff --git a/llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp.patch b/llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp.patch
new file mode 100644
index 0000000000..d24aa09232
--- /dev/null
+++ b/llvm/tools/hpvm/llvm_patches/lib/IR/Attributes.cpp.patch
@@ -0,0 +1,17 @@
+--- ../../../lib/IR/Attributes.cpp	2019-12-29 18:23:36.965041833 -0600
++++ lib/IR/Attributes.cpp	2019-12-29 18:48:27.129026177 -0600
+@@ -396,6 +396,14 @@
+     return Result;
+   }
+ 
++  // VISC attributes for arguments
++  if (hasAttribute(Attribute::In))
++    return "in";
++  if (hasAttribute(Attribute::Out))
++    return "out";
++  if (hasAttribute(Attribute::InOut))
++    return "inout";
++
+   llvm_unreachable("Unknown attribute");
+ }
+ 
-- 
GitLab