xref: /freebsd/contrib/llvm-project/llvm/include/llvm/Analysis/TensorSpec.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
1 //===- TensorSpec.h - type descriptor for a tensor --------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 #ifndef LLVM_ANALYSIS_TENSORSPEC_H
10 #define LLVM_ANALYSIS_TENSORSPEC_H
11 
12 #include "llvm/Config/llvm-config.h"
13 
14 #include "llvm/ADT/StringMap.h"
15 #include "llvm/IR/LLVMContext.h"
16 
17 #include <memory>
18 #include <optional>
19 #include <vector>
20 
21 namespace llvm {
22 namespace json {
23 class OStream;
24 class Value;
25 } // namespace json
26 
27 /// TensorSpec encapsulates the specification of a tensor: its dimensions, or
28 /// "shape" (row-major), its type (see TensorSpec::getDataType specializations
29 /// for supported types), its name and port (see "TensorFlow: Large-Scale
30 /// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
31 /// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
32 ///
33 /// Note that the design is motivated by Tensorflow, but it is not intended to
34 /// be Tensorflow-specific.
35 ///
36 /// Known tensor types. The left part is the C type, the
37 /// right is a name we can use to identify the type (to implement TensorSpec
38 /// equality checks), and to use, if needed, when mapping to an underlying
39 /// evaluator's type system. The main requirement is that the C type we use has
40 /// the same size and encoding (e.g. endian-ness) as the one used by the
41 /// evaluator.
42 #define SUPPORTED_TENSOR_TYPES(M)                                              \
43   M(float, Float)                                                              \
44   M(double, Double)                                                            \
45   M(int8_t, Int8)                                                              \
46   M(uint8_t, UInt8)                                                            \
47   M(int16_t, Int16)                                                            \
48   M(uint16_t, UInt16)                                                          \
49   M(int32_t, Int32)                                                            \
50   M(uint32_t, UInt32)                                                          \
51   M(int64_t, Int64)                                                            \
52   M(uint64_t, UInt64)
53 
54 enum class TensorType {
55   Invalid,
56 #define _TENSOR_TYPE_ENUM_MEMBERS(_, Name) Name,
57   SUPPORTED_TENSOR_TYPES(_TENSOR_TYPE_ENUM_MEMBERS)
58 #undef _TENSOR_TYPE_ENUM_MEMBERS
59       Total
60 };
61 
62 class TensorSpec final {
63 public:
64   template <typename T>
65   static TensorSpec createSpec(const std::string &Name,
66                                const std::vector<int64_t> &Shape,
67                                int Port = 0) {
68     return TensorSpec(Name, Port, getDataType<T>(), sizeof(T), Shape);
69   }
70 
name()71   const std::string &name() const { return Name; }
port()72   int port() const { return Port; }
type()73   TensorType type() const { return Type; }
shape()74   const std::vector<int64_t> &shape() const { return Shape; }
75 
76   bool operator==(const TensorSpec &Other) const {
77     return Name == Other.Name && Port == Other.Port && Type == Other.Type &&
78            Shape == Other.Shape;
79   }
80 
81   bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
82 
83   /// Get the number of elements in a tensor with this shape.
getElementCount()84   size_t getElementCount() const { return ElementCount; }
85   /// Get the size, in bytes, of one element.
getElementByteSize()86   size_t getElementByteSize() const { return ElementSize; }
87   /// Get the total size of a memory buffer needed to store the whole tensor.
getTotalTensorBufferSize()88   size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; }
89 
isElementType()90   template <typename T> bool isElementType() const {
91     return getDataType<T>() == Type;
92   }
93 
TensorSpec(const std::string & NewName,const TensorSpec & Other)94   TensorSpec(const std::string &NewName, const TensorSpec &Other)
95       : TensorSpec(NewName, Other.Port, Other.Type, Other.ElementSize,
96                    Other.Shape) {}
97 
98   void toJSON(json::OStream &OS) const;
99 
100 private:
101   TensorSpec(const std::string &Name, int Port, TensorType Type,
102              size_t ElementSize, const std::vector<int64_t> &Shape);
103 
104   template <typename T> static TensorType getDataType();
105 
106   std::string Name;
107   int Port = 0;
108   TensorType Type = TensorType::Invalid;
109   std::vector<int64_t> Shape;
110   size_t ElementCount = 0;
111   size_t ElementSize = 0;
112 };
113 
114 /// For debugging.
115 std::string tensorValueToString(const char *Buffer, const TensorSpec &Spec);
116 
117 /// Construct a TensorSpec from a JSON dictionary of the form:
118 /// { "name": <string>,
119 ///   "port": <int>,
120 ///   "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
121 ///   "shape": <array of ints> }
122 /// For the "type" field, see the C++ primitive types used in
123 /// TFUTILS_SUPPORTED_TYPES.
124 std::optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
125                                                 const json::Value &Value);
126 
127 #define TFUTILS_GETDATATYPE_DEF(T, Name)                                       \
128   template <> TensorType TensorSpec::getDataType<T>();
129 SUPPORTED_TENSOR_TYPES(TFUTILS_GETDATATYPE_DEF)
130 
131 #undef TFUTILS_GETDATATYPE_DEF
132 } // namespace llvm
133 
134 #endif // LLVM_ANALYSIS_TENSORSPEC_H
135