1 | //===- InteractiveModelRunner.cpp - noop ML model runner ----------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // A runner that communicates with an external agent via 2 file descriptors. |
10 | //===----------------------------------------------------------------------===// |
11 | #include "llvm/Analysis/InteractiveModelRunner.h" |
12 | #include "llvm/Analysis/MLModelRunner.h" |
13 | #include "llvm/Analysis/TensorSpec.h" |
14 | #include "llvm/Support/CommandLine.h" |
15 | #include "llvm/Support/ErrorHandling.h" |
16 | #include "llvm/Support/FileSystem.h" |
17 | #include "llvm/Support/raw_ostream.h" |
18 | |
19 | using namespace llvm; |
20 | |
21 | static cl::opt<bool> DebugReply( |
22 | "interactive-model-runner-echo-reply" , cl::init(Val: false), cl::Hidden, |
23 | cl::desc("The InteractiveModelRunner will echo back to stderr " |
24 | "the data received from the host (for debugging purposes)." )); |
25 | |
26 | InteractiveModelRunner::InteractiveModelRunner( |
27 | LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs, |
28 | const TensorSpec &Advice, StringRef OutboundName, StringRef InboundName) |
29 | : MLModelRunner(Ctx, MLModelRunner::Kind::Interactive, Inputs.size()), |
30 | InputSpecs(Inputs), OutputSpec(Advice), |
31 | InEC(sys::fs::openFileForRead(Name: InboundName, ResultFD&: Inbound)), |
32 | OutputBuffer(OutputSpec.getTotalTensorBufferSize()) { |
33 | if (InEC) { |
34 | Ctx.emitError(ErrorStr: "Cannot open inbound file: " + InEC.message()); |
35 | return; |
36 | } |
37 | { |
38 | auto OutStream = std::make_unique<raw_fd_ostream>(args&: OutboundName, args&: OutEC); |
39 | if (OutEC) { |
40 | Ctx.emitError(ErrorStr: "Cannot open outbound file: " + OutEC.message()); |
41 | return; |
42 | } |
43 | Log = std::make_unique<Logger>(args: std::move(OutStream), args: InputSpecs, args: Advice, |
44 | /*IncludeReward=*/args: false, args: Advice); |
45 | } |
46 | // Just like in the no inference case, this will allocate an appropriately |
47 | // sized buffer. |
48 | for (size_t I = 0; I < InputSpecs.size(); ++I) |
49 | setUpBufferForTensor(Index: I, Spec: InputSpecs[I], Buffer: nullptr); |
50 | Log->flush(); |
51 | } |
52 | |
53 | InteractiveModelRunner::~InteractiveModelRunner() { |
54 | sys::fs::file_t FDAsOSHandle = sys::fs::convertFDToNativeFile(FD: Inbound); |
55 | sys::fs::closeFile(F&: FDAsOSHandle); |
56 | } |
57 | |
58 | void *InteractiveModelRunner::evaluateUntyped() { |
59 | Log->startObservation(); |
60 | for (size_t I = 0; I < InputSpecs.size(); ++I) |
61 | Log->logTensorValue(FeatureID: I, RawData: reinterpret_cast<const char *>(getTensorUntyped(Index: I))); |
62 | Log->endObservation(); |
63 | Log->flush(); |
64 | |
65 | size_t InsPoint = 0; |
66 | char *Buff = OutputBuffer.data(); |
67 | const size_t Limit = OutputBuffer.size(); |
68 | while (InsPoint < Limit) { |
69 | auto ReadOrErr = ::sys::fs::readNativeFile( |
70 | FileHandle: sys::fs::convertFDToNativeFile(FD: Inbound), |
71 | Buf: {Buff + InsPoint, OutputBuffer.size() - InsPoint}); |
72 | if (ReadOrErr.takeError()) { |
73 | Ctx.emitError(ErrorStr: "Failed reading from inbound file" ); |
74 | break; |
75 | } |
76 | InsPoint += *ReadOrErr; |
77 | } |
78 | if (DebugReply) |
79 | dbgs() << OutputSpec.name() << ": " |
80 | << tensorValueToString(Buffer: OutputBuffer.data(), Spec: OutputSpec) << "\n" ; |
81 | return OutputBuffer.data(); |
82 | } |
83 | |