xref: /freebsd/contrib/llvm-project/llvm/lib/Analysis/InteractiveModelRunner.cpp (revision 59c8e88e72633afbc47a4ace0d2170d00d51f7dc)
1 //===- InteractiveModelRunner.cpp - noop ML model runner   ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // A runner that communicates with an external agent via 2 file descriptors.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/Analysis/InteractiveModelRunner.h"
12 #include "llvm/Analysis/MLModelRunner.h"
13 #include "llvm/Analysis/TensorSpec.h"
14 #include "llvm/Support/CommandLine.h"
15 #include "llvm/Support/ErrorHandling.h"
16 #include "llvm/Support/FileSystem.h"
17 #include "llvm/Support/raw_ostream.h"
18 
19 using namespace llvm;
20 
21 static cl::opt<bool> DebugReply(
22     "interactive-model-runner-echo-reply", cl::init(false), cl::Hidden,
23     cl::desc("The InteractiveModelRunner will echo back to stderr "
24              "the data received from the host (for debugging purposes)."));
25 
26 InteractiveModelRunner::InteractiveModelRunner(
27     LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs,
28     const TensorSpec &Advice, StringRef OutboundName, StringRef InboundName)
29     : MLModelRunner(Ctx, MLModelRunner::Kind::Interactive, Inputs.size()),
30       InputSpecs(Inputs), OutputSpec(Advice),
31       InEC(sys::fs::openFileForRead(InboundName, Inbound)),
32       OutputBuffer(OutputSpec.getTotalTensorBufferSize()) {
33   if (InEC) {
34     Ctx.emitError("Cannot open inbound file: " + InEC.message());
35     return;
36   }
37   {
38     auto OutStream = std::make_unique<raw_fd_ostream>(OutboundName, OutEC);
39     if (OutEC) {
40       Ctx.emitError("Cannot open outbound file: " + OutEC.message());
41       return;
42     }
43     Log = std::make_unique<Logger>(std::move(OutStream), InputSpecs, Advice,
44                                    /*IncludeReward=*/false, Advice);
45   }
46   // Just like in the no inference case, this will allocate an appropriately
47   // sized buffer.
48   for (size_t I = 0; I < InputSpecs.size(); ++I)
49     setUpBufferForTensor(I, InputSpecs[I], nullptr);
50   Log->flush();
51 }
52 
53 InteractiveModelRunner::~InteractiveModelRunner() {
54   sys::fs::file_t FDAsOSHandle = sys::fs::convertFDToNativeFile(Inbound);
55   sys::fs::closeFile(FDAsOSHandle);
56 }
57 
58 void *InteractiveModelRunner::evaluateUntyped() {
59   Log->startObservation();
60   for (size_t I = 0; I < InputSpecs.size(); ++I)
61     Log->logTensorValue(I, reinterpret_cast<const char *>(getTensorUntyped(I)));
62   Log->endObservation();
63   Log->flush();
64 
65   size_t InsPoint = 0;
66   char *Buff = OutputBuffer.data();
67   const size_t Limit = OutputBuffer.size();
68   while (InsPoint < Limit) {
69     auto ReadOrErr = ::sys::fs::readNativeFile(
70         sys::fs::convertFDToNativeFile(Inbound),
71         {Buff + InsPoint, OutputBuffer.size() - InsPoint});
72     if (ReadOrErr.takeError()) {
73       Ctx.emitError("Failed reading from inbound file");
74       break;
75     }
76     InsPoint += *ReadOrErr;
77   }
78   if (DebugReply)
79     dbgs() << OutputSpec.name() << ": "
80            << tensorValueToString(OutputBuffer.data(), OutputSpec) << "\n";
81   return OutputBuffer.data();
82 }
83