1//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This contains code to emit Stmt nodes as LLVM code.
10//
11//===----------------------------------------------------------------------===//
12
13#include "CGDebugInfo.h"
14#include "CGOpenMPRuntime.h"
15#include "CodeGenFunction.h"
16#include "CodeGenModule.h"
17#include "CodeGenPGO.h"
18#include "TargetInfo.h"
19#include "clang/AST/Attr.h"
20#include "clang/AST/Expr.h"
21#include "clang/AST/Stmt.h"
22#include "clang/AST/StmtVisitor.h"
23#include "clang/Basic/Builtins.h"
24#include "clang/Basic/DiagnosticSema.h"
25#include "clang/Basic/PrettyStackTrace.h"
26#include "clang/Basic/SourceManager.h"
27#include "clang/Basic/TargetInfo.h"
28#include "llvm/ADT/ArrayRef.h"
29#include "llvm/ADT/DenseMap.h"
30#include "llvm/ADT/SmallSet.h"
31#include "llvm/ADT/StringExtras.h"
32#include "llvm/IR/Assumptions.h"
33#include "llvm/IR/DataLayout.h"
34#include "llvm/IR/InlineAsm.h"
35#include "llvm/IR/Intrinsics.h"
36#include "llvm/IR/MDBuilder.h"
37#include "llvm/Support/SaveAndRestore.h"
38#include <optional>
39
40using namespace clang;
41using namespace CodeGen;
42
43//===----------------------------------------------------------------------===//
44// Statement Emission
45//===----------------------------------------------------------------------===//
46
47namespace llvm {
48extern cl::opt<bool> EnableSingleByteCoverage;
49} // namespace llvm
50
51void CodeGenFunction::EmitStopPoint(const Stmt *S) {
52 if (CGDebugInfo *DI = getDebugInfo()) {
53 SourceLocation Loc;
54 Loc = S->getBeginLoc();
55 DI->EmitLocation(Builder, Loc);
56
57 LastStopPoint = Loc;
58 }
59}
60
61void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
62 assert(S && "Null statement?");
63 PGO->setCurrentStmt(S);
64
65 // These statements have their own debug info handling.
66 if (EmitSimpleStmt(S, Attrs))
67 return;
68
69 // Check if we are generating unreachable code.
70 if (!HaveInsertPoint()) {
71 // If so, and the statement doesn't contain a label, then we do not need to
72 // generate actual code. This is safe because (1) the current point is
73 // unreachable, so we don't need to execute the code, and (2) we've already
74 // handled the statements which update internal data structures (like the
75 // local variable map) which could be used by subsequent statements.
76 if (!ContainsLabel(S)) {
77 // Verify that any decl statements were handled as simple, they may be in
78 // scope of subsequent reachable statements.
79 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
80 PGO->markStmtMaybeUsed(S);
81 return;
82 }
83
84 // Otherwise, make a new block to hold the code.
85 EnsureInsertPoint();
86 }
87
88 // Generate a stoppoint if we are emitting debug info.
89 EmitStopPoint(S);
90
91 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
92 // enabled.
93 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
94 if (const auto *D = dyn_cast<OMPExecutableDirective>(Val: S)) {
95 EmitSimpleOMPExecutableDirective(D: *D);
96 return;
97 }
98 }
99
100 switch (S->getStmtClass()) {
101 case Stmt::NoStmtClass:
102 case Stmt::CXXCatchStmtClass:
103 case Stmt::SEHExceptStmtClass:
104 case Stmt::SEHFinallyStmtClass:
105 case Stmt::MSDependentExistsStmtClass:
106 llvm_unreachable("invalid statement class to emit generically");
107 case Stmt::NullStmtClass:
108 case Stmt::CompoundStmtClass:
109 case Stmt::DeclStmtClass:
110 case Stmt::LabelStmtClass:
111 case Stmt::AttributedStmtClass:
112 case Stmt::GotoStmtClass:
113 case Stmt::BreakStmtClass:
114 case Stmt::ContinueStmtClass:
115 case Stmt::DefaultStmtClass:
116 case Stmt::CaseStmtClass:
117 case Stmt::SEHLeaveStmtClass:
118 case Stmt::SYCLKernelCallStmtClass:
119 llvm_unreachable("should have emitted these statements as simple");
120
121#define STMT(Type, Base)
122#define ABSTRACT_STMT(Op)
123#define EXPR(Type, Base) \
124 case Stmt::Type##Class:
125#include "clang/AST/StmtNodes.inc"
126 {
127 // Remember the block we came in on.
128 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
129 assert(incoming && "expression emission must have an insertion point");
130
131 EmitIgnoredExpr(E: cast<Expr>(Val: S));
132
133 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
134 assert(outgoing && "expression emission cleared block!");
135
136 // The expression emitters assume (reasonably!) that the insertion
137 // point is always set. To maintain that, the call-emission code
138 // for noreturn functions has to enter a new block with no
139 // predecessors. We want to kill that block and mark the current
140 // insertion point unreachable in the common case of a call like
141 // "exit();". Since expression emission doesn't otherwise create
142 // blocks with no predecessors, we can just test for that.
143 // However, we must be careful not to do this to our incoming
144 // block, because *statement* emission does sometimes create
145 // reachable blocks which will have no predecessors until later in
146 // the function. This occurs with, e.g., labels that are not
147 // reachable by fallthrough.
148 if (incoming != outgoing && outgoing->use_empty()) {
149 outgoing->eraseFromParent();
150 Builder.ClearInsertionPoint();
151 }
152 break;
153 }
154
155 case Stmt::IndirectGotoStmtClass:
156 EmitIndirectGotoStmt(S: cast<IndirectGotoStmt>(Val: *S)); break;
157
158 case Stmt::IfStmtClass: EmitIfStmt(S: cast<IfStmt>(Val: *S)); break;
159 case Stmt::WhileStmtClass: EmitWhileStmt(S: cast<WhileStmt>(Val: *S), Attrs); break;
160 case Stmt::DoStmtClass: EmitDoStmt(S: cast<DoStmt>(Val: *S), Attrs); break;
161 case Stmt::ForStmtClass: EmitForStmt(S: cast<ForStmt>(Val: *S), Attrs); break;
162
163 case Stmt::ReturnStmtClass: EmitReturnStmt(S: cast<ReturnStmt>(Val: *S)); break;
164
165 case Stmt::SwitchStmtClass: EmitSwitchStmt(S: cast<SwitchStmt>(Val: *S)); break;
166 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
167 case Stmt::MSAsmStmtClass: EmitAsmStmt(S: cast<AsmStmt>(Val: *S)); break;
168 case Stmt::CoroutineBodyStmtClass:
169 EmitCoroutineBody(S: cast<CoroutineBodyStmt>(Val: *S));
170 break;
171 case Stmt::CoreturnStmtClass:
172 EmitCoreturnStmt(S: cast<CoreturnStmt>(Val: *S));
173 break;
174 case Stmt::CapturedStmtClass: {
175 const CapturedStmt *CS = cast<CapturedStmt>(Val: S);
176 EmitCapturedStmt(S: *CS, K: CS->getCapturedRegionKind());
177 }
178 break;
179 case Stmt::ObjCAtTryStmtClass:
180 EmitObjCAtTryStmt(S: cast<ObjCAtTryStmt>(Val: *S));
181 break;
182 case Stmt::ObjCAtCatchStmtClass:
183 llvm_unreachable(
184 "@catch statements should be handled by EmitObjCAtTryStmt");
185 case Stmt::ObjCAtFinallyStmtClass:
186 llvm_unreachable(
187 "@finally statements should be handled by EmitObjCAtTryStmt");
188 case Stmt::ObjCAtThrowStmtClass:
189 EmitObjCAtThrowStmt(S: cast<ObjCAtThrowStmt>(Val: *S));
190 break;
191 case Stmt::ObjCAtSynchronizedStmtClass:
192 EmitObjCAtSynchronizedStmt(S: cast<ObjCAtSynchronizedStmt>(Val: *S));
193 break;
194 case Stmt::ObjCForCollectionStmtClass:
195 EmitObjCForCollectionStmt(S: cast<ObjCForCollectionStmt>(Val: *S));
196 break;
197 case Stmt::ObjCAutoreleasePoolStmtClass:
198 EmitObjCAutoreleasePoolStmt(S: cast<ObjCAutoreleasePoolStmt>(Val: *S));
199 break;
200
201 case Stmt::CXXTryStmtClass:
202 EmitCXXTryStmt(S: cast<CXXTryStmt>(Val: *S));
203 break;
204 case Stmt::CXXForRangeStmtClass:
205 EmitCXXForRangeStmt(S: cast<CXXForRangeStmt>(Val: *S), Attrs);
206 break;
207 case Stmt::SEHTryStmtClass:
208 EmitSEHTryStmt(S: cast<SEHTryStmt>(Val: *S));
209 break;
210 case Stmt::OMPMetaDirectiveClass:
211 EmitOMPMetaDirective(S: cast<OMPMetaDirective>(Val: *S));
212 break;
213 case Stmt::OMPCanonicalLoopClass:
214 EmitOMPCanonicalLoop(S: cast<OMPCanonicalLoop>(Val: S));
215 break;
216 case Stmt::OMPParallelDirectiveClass:
217 EmitOMPParallelDirective(S: cast<OMPParallelDirective>(Val: *S));
218 break;
219 case Stmt::OMPSimdDirectiveClass:
220 EmitOMPSimdDirective(S: cast<OMPSimdDirective>(Val: *S));
221 break;
222 case Stmt::OMPTileDirectiveClass:
223 EmitOMPTileDirective(S: cast<OMPTileDirective>(Val: *S));
224 break;
225 case Stmt::OMPStripeDirectiveClass:
226 EmitOMPStripeDirective(S: cast<OMPStripeDirective>(Val: *S));
227 break;
228 case Stmt::OMPUnrollDirectiveClass:
229 EmitOMPUnrollDirective(S: cast<OMPUnrollDirective>(Val: *S));
230 break;
231 case Stmt::OMPReverseDirectiveClass:
232 EmitOMPReverseDirective(S: cast<OMPReverseDirective>(Val: *S));
233 break;
234 case Stmt::OMPInterchangeDirectiveClass:
235 EmitOMPInterchangeDirective(S: cast<OMPInterchangeDirective>(Val: *S));
236 break;
237 case Stmt::OMPForDirectiveClass:
238 EmitOMPForDirective(S: cast<OMPForDirective>(Val: *S));
239 break;
240 case Stmt::OMPForSimdDirectiveClass:
241 EmitOMPForSimdDirective(S: cast<OMPForSimdDirective>(Val: *S));
242 break;
243 case Stmt::OMPSectionsDirectiveClass:
244 EmitOMPSectionsDirective(S: cast<OMPSectionsDirective>(Val: *S));
245 break;
246 case Stmt::OMPSectionDirectiveClass:
247 EmitOMPSectionDirective(S: cast<OMPSectionDirective>(Val: *S));
248 break;
249 case Stmt::OMPSingleDirectiveClass:
250 EmitOMPSingleDirective(S: cast<OMPSingleDirective>(Val: *S));
251 break;
252 case Stmt::OMPMasterDirectiveClass:
253 EmitOMPMasterDirective(S: cast<OMPMasterDirective>(Val: *S));
254 break;
255 case Stmt::OMPCriticalDirectiveClass:
256 EmitOMPCriticalDirective(S: cast<OMPCriticalDirective>(Val: *S));
257 break;
258 case Stmt::OMPParallelForDirectiveClass:
259 EmitOMPParallelForDirective(S: cast<OMPParallelForDirective>(Val: *S));
260 break;
261 case Stmt::OMPParallelForSimdDirectiveClass:
262 EmitOMPParallelForSimdDirective(S: cast<OMPParallelForSimdDirective>(Val: *S));
263 break;
264 case Stmt::OMPParallelMasterDirectiveClass:
265 EmitOMPParallelMasterDirective(S: cast<OMPParallelMasterDirective>(Val: *S));
266 break;
267 case Stmt::OMPParallelSectionsDirectiveClass:
268 EmitOMPParallelSectionsDirective(S: cast<OMPParallelSectionsDirective>(Val: *S));
269 break;
270 case Stmt::OMPTaskDirectiveClass:
271 EmitOMPTaskDirective(S: cast<OMPTaskDirective>(Val: *S));
272 break;
273 case Stmt::OMPTaskyieldDirectiveClass:
274 EmitOMPTaskyieldDirective(S: cast<OMPTaskyieldDirective>(Val: *S));
275 break;
276 case Stmt::OMPErrorDirectiveClass:
277 EmitOMPErrorDirective(S: cast<OMPErrorDirective>(Val: *S));
278 break;
279 case Stmt::OMPBarrierDirectiveClass:
280 EmitOMPBarrierDirective(S: cast<OMPBarrierDirective>(Val: *S));
281 break;
282 case Stmt::OMPTaskwaitDirectiveClass:
283 EmitOMPTaskwaitDirective(S: cast<OMPTaskwaitDirective>(Val: *S));
284 break;
285 case Stmt::OMPTaskgroupDirectiveClass:
286 EmitOMPTaskgroupDirective(S: cast<OMPTaskgroupDirective>(Val: *S));
287 break;
288 case Stmt::OMPFlushDirectiveClass:
289 EmitOMPFlushDirective(S: cast<OMPFlushDirective>(Val: *S));
290 break;
291 case Stmt::OMPDepobjDirectiveClass:
292 EmitOMPDepobjDirective(S: cast<OMPDepobjDirective>(Val: *S));
293 break;
294 case Stmt::OMPScanDirectiveClass:
295 EmitOMPScanDirective(S: cast<OMPScanDirective>(Val: *S));
296 break;
297 case Stmt::OMPOrderedDirectiveClass:
298 EmitOMPOrderedDirective(S: cast<OMPOrderedDirective>(Val: *S));
299 break;
300 case Stmt::OMPAtomicDirectiveClass:
301 EmitOMPAtomicDirective(S: cast<OMPAtomicDirective>(Val: *S));
302 break;
303 case Stmt::OMPTargetDirectiveClass:
304 EmitOMPTargetDirective(S: cast<OMPTargetDirective>(Val: *S));
305 break;
306 case Stmt::OMPTeamsDirectiveClass:
307 EmitOMPTeamsDirective(S: cast<OMPTeamsDirective>(Val: *S));
308 break;
309 case Stmt::OMPCancellationPointDirectiveClass:
310 EmitOMPCancellationPointDirective(S: cast<OMPCancellationPointDirective>(Val: *S));
311 break;
312 case Stmt::OMPCancelDirectiveClass:
313 EmitOMPCancelDirective(S: cast<OMPCancelDirective>(Val: *S));
314 break;
315 case Stmt::OMPTargetDataDirectiveClass:
316 EmitOMPTargetDataDirective(S: cast<OMPTargetDataDirective>(Val: *S));
317 break;
318 case Stmt::OMPTargetEnterDataDirectiveClass:
319 EmitOMPTargetEnterDataDirective(S: cast<OMPTargetEnterDataDirective>(Val: *S));
320 break;
321 case Stmt::OMPTargetExitDataDirectiveClass:
322 EmitOMPTargetExitDataDirective(S: cast<OMPTargetExitDataDirective>(Val: *S));
323 break;
324 case Stmt::OMPTargetParallelDirectiveClass:
325 EmitOMPTargetParallelDirective(S: cast<OMPTargetParallelDirective>(Val: *S));
326 break;
327 case Stmt::OMPTargetParallelForDirectiveClass:
328 EmitOMPTargetParallelForDirective(S: cast<OMPTargetParallelForDirective>(Val: *S));
329 break;
330 case Stmt::OMPTaskLoopDirectiveClass:
331 EmitOMPTaskLoopDirective(S: cast<OMPTaskLoopDirective>(Val: *S));
332 break;
333 case Stmt::OMPTaskLoopSimdDirectiveClass:
334 EmitOMPTaskLoopSimdDirective(S: cast<OMPTaskLoopSimdDirective>(Val: *S));
335 break;
336 case Stmt::OMPMasterTaskLoopDirectiveClass:
337 EmitOMPMasterTaskLoopDirective(S: cast<OMPMasterTaskLoopDirective>(Val: *S));
338 break;
339 case Stmt::OMPMaskedTaskLoopDirectiveClass:
340 EmitOMPMaskedTaskLoopDirective(S: cast<OMPMaskedTaskLoopDirective>(Val: *S));
341 break;
342 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
343 EmitOMPMasterTaskLoopSimdDirective(
344 S: cast<OMPMasterTaskLoopSimdDirective>(Val: *S));
345 break;
346 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
347 EmitOMPMaskedTaskLoopSimdDirective(
348 S: cast<OMPMaskedTaskLoopSimdDirective>(Val: *S));
349 break;
350 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
351 EmitOMPParallelMasterTaskLoopDirective(
352 S: cast<OMPParallelMasterTaskLoopDirective>(Val: *S));
353 break;
354 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
355 EmitOMPParallelMaskedTaskLoopDirective(
356 S: cast<OMPParallelMaskedTaskLoopDirective>(Val: *S));
357 break;
358 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
359 EmitOMPParallelMasterTaskLoopSimdDirective(
360 S: cast<OMPParallelMasterTaskLoopSimdDirective>(Val: *S));
361 break;
362 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
363 EmitOMPParallelMaskedTaskLoopSimdDirective(
364 S: cast<OMPParallelMaskedTaskLoopSimdDirective>(Val: *S));
365 break;
366 case Stmt::OMPDistributeDirectiveClass:
367 EmitOMPDistributeDirective(S: cast<OMPDistributeDirective>(Val: *S));
368 break;
369 case Stmt::OMPTargetUpdateDirectiveClass:
370 EmitOMPTargetUpdateDirective(S: cast<OMPTargetUpdateDirective>(Val: *S));
371 break;
372 case Stmt::OMPDistributeParallelForDirectiveClass:
373 EmitOMPDistributeParallelForDirective(
374 S: cast<OMPDistributeParallelForDirective>(Val: *S));
375 break;
376 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
377 EmitOMPDistributeParallelForSimdDirective(
378 S: cast<OMPDistributeParallelForSimdDirective>(Val: *S));
379 break;
380 case Stmt::OMPDistributeSimdDirectiveClass:
381 EmitOMPDistributeSimdDirective(S: cast<OMPDistributeSimdDirective>(Val: *S));
382 break;
383 case Stmt::OMPTargetParallelForSimdDirectiveClass:
384 EmitOMPTargetParallelForSimdDirective(
385 S: cast<OMPTargetParallelForSimdDirective>(Val: *S));
386 break;
387 case Stmt::OMPTargetSimdDirectiveClass:
388 EmitOMPTargetSimdDirective(S: cast<OMPTargetSimdDirective>(Val: *S));
389 break;
390 case Stmt::OMPTeamsDistributeDirectiveClass:
391 EmitOMPTeamsDistributeDirective(S: cast<OMPTeamsDistributeDirective>(Val: *S));
392 break;
393 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
394 EmitOMPTeamsDistributeSimdDirective(
395 S: cast<OMPTeamsDistributeSimdDirective>(Val: *S));
396 break;
397 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
398 EmitOMPTeamsDistributeParallelForSimdDirective(
399 S: cast<OMPTeamsDistributeParallelForSimdDirective>(Val: *S));
400 break;
401 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
402 EmitOMPTeamsDistributeParallelForDirective(
403 S: cast<OMPTeamsDistributeParallelForDirective>(Val: *S));
404 break;
405 case Stmt::OMPTargetTeamsDirectiveClass:
406 EmitOMPTargetTeamsDirective(S: cast<OMPTargetTeamsDirective>(Val: *S));
407 break;
408 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
409 EmitOMPTargetTeamsDistributeDirective(
410 S: cast<OMPTargetTeamsDistributeDirective>(Val: *S));
411 break;
412 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
413 EmitOMPTargetTeamsDistributeParallelForDirective(
414 S: cast<OMPTargetTeamsDistributeParallelForDirective>(Val: *S));
415 break;
416 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
417 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
418 S: cast<OMPTargetTeamsDistributeParallelForSimdDirective>(Val: *S));
419 break;
420 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
421 EmitOMPTargetTeamsDistributeSimdDirective(
422 S: cast<OMPTargetTeamsDistributeSimdDirective>(Val: *S));
423 break;
424 case Stmt::OMPInteropDirectiveClass:
425 EmitOMPInteropDirective(S: cast<OMPInteropDirective>(Val: *S));
426 break;
427 case Stmt::OMPDispatchDirectiveClass:
428 CGM.ErrorUnsupported(S, Type: "OpenMP dispatch directive");
429 break;
430 case Stmt::OMPScopeDirectiveClass:
431 EmitOMPScopeDirective(S: cast<OMPScopeDirective>(Val: *S));
432 break;
433 case Stmt::OMPMaskedDirectiveClass:
434 EmitOMPMaskedDirective(S: cast<OMPMaskedDirective>(Val: *S));
435 break;
436 case Stmt::OMPGenericLoopDirectiveClass:
437 EmitOMPGenericLoopDirective(S: cast<OMPGenericLoopDirective>(Val: *S));
438 break;
439 case Stmt::OMPTeamsGenericLoopDirectiveClass:
440 EmitOMPTeamsGenericLoopDirective(S: cast<OMPTeamsGenericLoopDirective>(Val: *S));
441 break;
442 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
443 EmitOMPTargetTeamsGenericLoopDirective(
444 S: cast<OMPTargetTeamsGenericLoopDirective>(Val: *S));
445 break;
446 case Stmt::OMPParallelGenericLoopDirectiveClass:
447 EmitOMPParallelGenericLoopDirective(
448 S: cast<OMPParallelGenericLoopDirective>(Val: *S));
449 break;
450 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
451 EmitOMPTargetParallelGenericLoopDirective(
452 S: cast<OMPTargetParallelGenericLoopDirective>(Val: *S));
453 break;
454 case Stmt::OMPParallelMaskedDirectiveClass:
455 EmitOMPParallelMaskedDirective(S: cast<OMPParallelMaskedDirective>(Val: *S));
456 break;
457 case Stmt::OMPAssumeDirectiveClass:
458 EmitOMPAssumeDirective(S: cast<OMPAssumeDirective>(Val: *S));
459 break;
460 case Stmt::OpenACCComputeConstructClass:
461 EmitOpenACCComputeConstruct(S: cast<OpenACCComputeConstruct>(Val: *S));
462 break;
463 case Stmt::OpenACCLoopConstructClass:
464 EmitOpenACCLoopConstruct(S: cast<OpenACCLoopConstruct>(Val: *S));
465 break;
466 case Stmt::OpenACCCombinedConstructClass:
467 EmitOpenACCCombinedConstruct(S: cast<OpenACCCombinedConstruct>(Val: *S));
468 break;
469 case Stmt::OpenACCDataConstructClass:
470 EmitOpenACCDataConstruct(S: cast<OpenACCDataConstruct>(Val: *S));
471 break;
472 case Stmt::OpenACCEnterDataConstructClass:
473 EmitOpenACCEnterDataConstruct(S: cast<OpenACCEnterDataConstruct>(Val: *S));
474 break;
475 case Stmt::OpenACCExitDataConstructClass:
476 EmitOpenACCExitDataConstruct(S: cast<OpenACCExitDataConstruct>(Val: *S));
477 break;
478 case Stmt::OpenACCHostDataConstructClass:
479 EmitOpenACCHostDataConstruct(S: cast<OpenACCHostDataConstruct>(Val: *S));
480 break;
481 case Stmt::OpenACCWaitConstructClass:
482 EmitOpenACCWaitConstruct(S: cast<OpenACCWaitConstruct>(Val: *S));
483 break;
484 case Stmt::OpenACCInitConstructClass:
485 EmitOpenACCInitConstruct(S: cast<OpenACCInitConstruct>(Val: *S));
486 break;
487 case Stmt::OpenACCShutdownConstructClass:
488 EmitOpenACCShutdownConstruct(S: cast<OpenACCShutdownConstruct>(Val: *S));
489 break;
490 case Stmt::OpenACCSetConstructClass:
491 EmitOpenACCSetConstruct(S: cast<OpenACCSetConstruct>(Val: *S));
492 break;
493 case Stmt::OpenACCUpdateConstructClass:
494 EmitOpenACCUpdateConstruct(S: cast<OpenACCUpdateConstruct>(Val: *S));
495 break;
496 case Stmt::OpenACCAtomicConstructClass:
497 EmitOpenACCAtomicConstruct(S: cast<OpenACCAtomicConstruct>(Val: *S));
498 break;
499 case Stmt::OpenACCCacheConstructClass:
500 EmitOpenACCCacheConstruct(S: cast<OpenACCCacheConstruct>(Val: *S));
501 break;
502 }
503}
504
505bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
506 ArrayRef<const Attr *> Attrs) {
507 switch (S->getStmtClass()) {
508 default:
509 return false;
510 case Stmt::NullStmtClass:
511 break;
512 case Stmt::CompoundStmtClass:
513 EmitCompoundStmt(S: cast<CompoundStmt>(Val: *S));
514 break;
515 case Stmt::DeclStmtClass:
516 EmitDeclStmt(S: cast<DeclStmt>(Val: *S));
517 break;
518 case Stmt::LabelStmtClass:
519 EmitLabelStmt(S: cast<LabelStmt>(Val: *S));
520 break;
521 case Stmt::AttributedStmtClass:
522 EmitAttributedStmt(S: cast<AttributedStmt>(Val: *S));
523 break;
524 case Stmt::GotoStmtClass:
525 EmitGotoStmt(S: cast<GotoStmt>(Val: *S));
526 break;
527 case Stmt::BreakStmtClass:
528 EmitBreakStmt(S: cast<BreakStmt>(Val: *S));
529 break;
530 case Stmt::ContinueStmtClass:
531 EmitContinueStmt(S: cast<ContinueStmt>(Val: *S));
532 break;
533 case Stmt::DefaultStmtClass:
534 EmitDefaultStmt(S: cast<DefaultStmt>(Val: *S), Attrs);
535 break;
536 case Stmt::CaseStmtClass:
537 EmitCaseStmt(S: cast<CaseStmt>(Val: *S), Attrs);
538 break;
539 case Stmt::SEHLeaveStmtClass:
540 EmitSEHLeaveStmt(S: cast<SEHLeaveStmt>(Val: *S));
541 break;
542 case Stmt::SYCLKernelCallStmtClass:
543 // SYCL kernel call statements are generated as wrappers around the body
544 // of functions declared with the sycl_kernel_entry_point attribute. Such
545 // functions are used to specify how a SYCL kernel (a function object) is
546 // to be invoked; the SYCL kernel call statement contains a transformed
547 // variation of the function body and is used to generate a SYCL kernel
548 // caller function; a function that serves as the device side entry point
549 // used to execute the SYCL kernel. The sycl_kernel_entry_point attributed
550 // function is invoked by host code in order to trigger emission of the
551 // device side SYCL kernel caller function and to generate metadata needed
552 // by SYCL run-time library implementations; the function is otherwise
553 // intended to have no effect. As such, the function body is not evaluated
554 // as part of the invocation during host compilation (and the function
555 // should not be called or emitted during device compilation); the SYCL
556 // kernel call statement is thus handled as a null statement for the
557 // purpose of code generation.
558 break;
559 }
560 return true;
561}
562
563/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
564/// this captures the expression result of the last sub-statement and returns it
565/// (for use by the statement expression extension).
566Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
567 AggValueSlot AggSlot) {
568 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
569 "LLVM IR generation of compound statement ('{}')");
570
571 // Keep track of the current cleanup stack depth, including debug scopes.
572 LexicalScope Scope(*this, S.getSourceRange());
573
574 return EmitCompoundStmtWithoutScope(S, GetLast, AVS: AggSlot);
575}
576
577Address
578CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
579 bool GetLast,
580 AggValueSlot AggSlot) {
581
582 const Stmt *ExprResult = S.getStmtExprResult();
583 assert((!GetLast || (GetLast && ExprResult)) &&
584 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
585
586 Address RetAlloca = Address::invalid();
587
588 for (auto *CurStmt : S.body()) {
589 if (GetLast && ExprResult == CurStmt) {
590 // We have to special case labels here. They are statements, but when put
591 // at the end of a statement expression, they yield the value of their
592 // subexpression. Handle this by walking through all labels we encounter,
593 // emitting them before we evaluate the subexpr.
594 // Similar issues arise for attributed statements.
595 while (!isa<Expr>(Val: ExprResult)) {
596 if (const auto *LS = dyn_cast<LabelStmt>(Val: ExprResult)) {
597 EmitLabel(D: LS->getDecl());
598 ExprResult = LS->getSubStmt();
599 } else if (const auto *AS = dyn_cast<AttributedStmt>(Val: ExprResult)) {
600 // FIXME: Update this if we ever have attributes that affect the
601 // semantics of an expression.
602 ExprResult = AS->getSubStmt();
603 } else {
604 llvm_unreachable("unknown value statement");
605 }
606 }
607
608 EnsureInsertPoint();
609
610 const Expr *E = cast<Expr>(Val: ExprResult);
611 QualType ExprTy = E->getType();
612 if (hasAggregateEvaluationKind(T: ExprTy)) {
613 EmitAggExpr(E, AS: AggSlot);
614 } else {
615 // We can't return an RValue here because there might be cleanups at
616 // the end of the StmtExpr. Because of that, we have to emit the result
617 // here into a temporary alloca.
618 RetAlloca = CreateMemTemp(T: ExprTy);
619 EmitAnyExprToMem(E, Location: RetAlloca, Quals: Qualifiers(),
620 /*IsInit*/ IsInitializer: false);
621 }
622 } else {
623 EmitStmt(S: CurStmt);
624 }
625 }
626
627 return RetAlloca;
628}
629
630void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
631 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(Val: BB->getTerminator());
632
633 // If there is a cleanup stack, then we it isn't worth trying to
634 // simplify this block (we would need to remove it from the scope map
635 // and cleanup entry).
636 if (!EHStack.empty())
637 return;
638
639 // Can only simplify direct branches.
640 if (!BI || !BI->isUnconditional())
641 return;
642
643 // Can only simplify empty blocks.
644 if (BI->getIterator() != BB->begin())
645 return;
646
647 BB->replaceAllUsesWith(V: BI->getSuccessor(i: 0));
648 BI->eraseFromParent();
649 BB->eraseFromParent();
650}
651
652void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
653 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
654
655 // Fall out of the current block (if necessary).
656 EmitBranch(Block: BB);
657
658 if (IsFinished && BB->use_empty()) {
659 delete BB;
660 return;
661 }
662
663 // Place the block after the current block, if possible, or else at
664 // the end of the function.
665 if (CurBB && CurBB->getParent())
666 CurFn->insert(Position: std::next(x: CurBB->getIterator()), BB);
667 else
668 CurFn->insert(Position: CurFn->end(), BB);
669 Builder.SetInsertPoint(BB);
670}
671
672void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
673 // Emit a branch from the current block to the target one if this
674 // was a real block. If this was just a fall-through block after a
675 // terminator, don't emit it.
676 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
677
678 if (!CurBB || CurBB->getTerminator()) {
679 // If there is no insert point or the previous block is already
680 // terminated, don't touch it.
681 } else {
682 // Otherwise, create a fall-through branch.
683 Builder.CreateBr(Dest: Target);
684 }
685
686 Builder.ClearInsertionPoint();
687}
688
689void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
690 bool inserted = false;
691 for (llvm::User *u : block->users()) {
692 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(Val: u)) {
693 CurFn->insert(Position: std::next(x: insn->getParent()->getIterator()), BB: block);
694 inserted = true;
695 break;
696 }
697 }
698
699 if (!inserted)
700 CurFn->insert(Position: CurFn->end(), BB: block);
701
702 Builder.SetInsertPoint(block);
703}
704
705CodeGenFunction::JumpDest
706CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
707 JumpDest &Dest = LabelMap[D];
708 if (Dest.isValid()) return Dest;
709
710 // Create, but don't insert, the new block.
711 Dest = JumpDest(createBasicBlock(name: D->getName()),
712 EHScopeStack::stable_iterator::invalid(),
713 NextCleanupDestIndex++);
714 return Dest;
715}
716
717void CodeGenFunction::EmitLabel(const LabelDecl *D) {
718 // Add this label to the current lexical scope if we're within any
719 // normal cleanups. Jumps "in" to this label --- when permitted by
720 // the language --- may need to be routed around such cleanups.
721 if (EHStack.hasNormalCleanups() && CurLexicalScope)
722 CurLexicalScope->addLabel(label: D);
723
724 JumpDest &Dest = LabelMap[D];
725
726 // If we didn't need a forward reference to this label, just go
727 // ahead and create a destination at the current scope.
728 if (!Dest.isValid()) {
729 Dest = getJumpDestInCurrentScope(Name: D->getName());
730
731 // Otherwise, we need to give this label a target depth and remove
732 // it from the branch-fixups list.
733 } else {
734 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
735 Dest.setScopeDepth(EHStack.stable_begin());
736 ResolveBranchFixups(Target: Dest.getBlock());
737 }
738
739 EmitBlock(BB: Dest.getBlock());
740
741 // Emit debug info for labels.
742 if (CGDebugInfo *DI = getDebugInfo()) {
743 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
744 DI->setLocation(D->getLocation());
745 DI->EmitLabel(D, Builder);
746 }
747 }
748
749 incrementProfileCounter(S: D->getStmt());
750}
751
752/// Change the cleanup scope of the labels in this lexical scope to
753/// match the scope of the enclosing context.
754void CodeGenFunction::LexicalScope::rescopeLabels() {
755 assert(!Labels.empty());
756 EHScopeStack::stable_iterator innermostScope
757 = CGF.EHStack.getInnermostNormalCleanup();
758
759 // Change the scope depth of all the labels.
760 for (const LabelDecl *Label : Labels) {
761 assert(CGF.LabelMap.count(Label));
762 JumpDest &dest = CGF.LabelMap.find(Val: Label)->second;
763 assert(dest.getScopeDepth().isValid());
764 assert(innermostScope.encloses(dest.getScopeDepth()));
765 dest.setScopeDepth(innermostScope);
766 }
767
768 // Reparent the labels if the new scope also has cleanups.
769 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
770 ParentScope->Labels.append(in_start: Labels.begin(), in_end: Labels.end());
771 }
772}
773
774
775void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
776 EmitLabel(D: S.getDecl());
777
778 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
779 if (getLangOpts().EHAsynch && S.isSideEntry())
780 EmitSehCppScopeBegin();
781
782 EmitStmt(S: S.getSubStmt());
783}
784
785void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
786 bool nomerge = false;
787 bool noinline = false;
788 bool alwaysinline = false;
789 bool noconvergent = false;
790 HLSLControlFlowHintAttr::Spelling flattenOrBranch =
791 HLSLControlFlowHintAttr::SpellingNotCalculated;
792 const CallExpr *musttail = nullptr;
793 const AtomicAttr *AA = nullptr;
794
795 for (const auto *A : S.getAttrs()) {
796 switch (A->getKind()) {
797 default:
798 break;
799 case attr::NoMerge:
800 nomerge = true;
801 break;
802 case attr::NoInline:
803 noinline = true;
804 break;
805 case attr::AlwaysInline:
806 alwaysinline = true;
807 break;
808 case attr::NoConvergent:
809 noconvergent = true;
810 break;
811 case attr::MustTail: {
812 const Stmt *Sub = S.getSubStmt();
813 const ReturnStmt *R = cast<ReturnStmt>(Val: Sub);
814 musttail = cast<CallExpr>(Val: R->getRetValue()->IgnoreParens());
815 } break;
816 case attr::CXXAssume: {
817 const Expr *Assumption = cast<CXXAssumeAttr>(Val: A)->getAssumption();
818 if (getLangOpts().CXXAssumptions && Builder.GetInsertBlock() &&
819 !Assumption->HasSideEffects(Ctx: getContext())) {
820 llvm::Value *AssumptionVal = EmitCheckedArgForAssume(E: Assumption);
821 Builder.CreateAssumption(Cond: AssumptionVal);
822 }
823 } break;
824 case attr::Atomic:
825 AA = cast<AtomicAttr>(Val: A);
826 break;
827 case attr::HLSLControlFlowHint: {
828 flattenOrBranch = cast<HLSLControlFlowHintAttr>(Val: A)->getSemanticSpelling();
829 } break;
830 }
831 }
832 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
833 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
834 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
835 SaveAndRestore save_noconvergent(InNoConvergentAttributedStmt, noconvergent);
836 SaveAndRestore save_musttail(MustTailCall, musttail);
837 SaveAndRestore save_flattenOrBranch(HLSLControlFlowAttr, flattenOrBranch);
838 CGAtomicOptionsRAII AORAII(CGM, AA);
839 EmitStmt(S: S.getSubStmt(), Attrs: S.getAttrs());
840}
841
842void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
843 // If this code is reachable then emit a stop point (if generating
844 // debug info). We have to do this ourselves because we are on the
845 // "simple" statement path.
846 if (HaveInsertPoint())
847 EmitStopPoint(S: &S);
848
849 EmitBranchThroughCleanup(Dest: getJumpDestForLabel(D: S.getLabel()));
850}
851
852
853void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
854 if (const LabelDecl *Target = S.getConstantTarget()) {
855 EmitBranchThroughCleanup(Dest: getJumpDestForLabel(D: Target));
856 return;
857 }
858
859 // Ensure that we have an i8* for our PHI node.
860 llvm::Value *V = Builder.CreateBitCast(V: EmitScalarExpr(E: S.getTarget()),
861 DestTy: Int8PtrTy, Name: "addr");
862 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
863
864 // Get the basic block for the indirect goto.
865 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
866
867 // The first instruction in the block has to be the PHI for the switch dest,
868 // add an entry for this branch.
869 cast<llvm::PHINode>(Val: IndGotoBB->begin())->addIncoming(V, BB: CurBB);
870
871 EmitBranch(Target: IndGotoBB);
872}
873
874void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
875 const Stmt *Else = S.getElse();
876
877 // The else branch of a consteval if statement is always the only branch that
878 // can be runtime evaluated.
879 if (S.isConsteval()) {
880 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : Else;
881 if (Executed) {
882 RunCleanupsScope ExecutedScope(*this);
883 EmitStmt(S: Executed);
884 }
885 return;
886 }
887
888 // C99 6.8.4.1: The first substatement is executed if the expression compares
889 // unequal to 0. The condition must be a scalar type.
890 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
891 ApplyDebugLocation DL(*this, S.getCond());
892
893 if (S.getInit())
894 EmitStmt(S: S.getInit());
895
896 if (S.getConditionVariable())
897 EmitDecl(D: *S.getConditionVariable());
898
899 // If the condition constant folds and can be elided, try to avoid emitting
900 // the condition and the dead arm of the if/else.
901 bool CondConstant;
902 if (ConstantFoldsToSimpleInteger(Cond: S.getCond(), Result&: CondConstant,
903 AllowLabels: S.isConstexpr())) {
904 // Figure out which block (then or else) is executed.
905 const Stmt *Executed = S.getThen();
906 const Stmt *Skipped = Else;
907 if (!CondConstant) // Condition false?
908 std::swap(a&: Executed, b&: Skipped);
909
910 // If the skipped block has no labels in it, just emit the executed block.
911 // This avoids emitting dead code and simplifies the CFG substantially.
912 if (S.isConstexpr() || !ContainsLabel(S: Skipped)) {
913 if (CondConstant)
914 incrementProfileCounter(S: &S);
915 if (Executed) {
916 MaybeEmitDeferredVarDeclInit(var: S.getConditionVariable());
917 RunCleanupsScope ExecutedScope(*this);
918 EmitStmt(S: Executed);
919 }
920 PGO->markStmtMaybeUsed(S: Skipped);
921 return;
922 }
923 }
924
925 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
926 // the conditional branch.
927 llvm::BasicBlock *ThenBlock = createBasicBlock(name: "if.then");
928 llvm::BasicBlock *ContBlock = createBasicBlock(name: "if.end");
929 llvm::BasicBlock *ElseBlock = ContBlock;
930 if (Else)
931 ElseBlock = createBasicBlock(name: "if.else");
932
933 // Prefer the PGO based weights over the likelihood attribute.
934 // When the build isn't optimized the metadata isn't used, so don't generate
935 // it.
936 // Also, differentiate between disabled PGO and a never executed branch with
937 // PGO. Assuming PGO is in use:
938 // - we want to ignore the [[likely]] attribute if the branch is never
939 // executed,
940 // - assuming the profile is poor, preserving the attribute may still be
941 // beneficial.
942 // As an approximation, preserve the attribute only if both the branch and the
943 // parent context were not executed.
944 Stmt::Likelihood LH = Stmt::LH_None;
945 uint64_t ThenCount = getProfileCount(S: S.getThen());
946 if (!ThenCount && !getCurrentProfileCount() &&
947 CGM.getCodeGenOpts().OptimizationLevel)
948 LH = Stmt::getLikelihood(Then: S.getThen(), Else);
949
950 // When measuring MC/DC, always fully evaluate the condition up front using
951 // EvaluateExprAsBool() so that the test vector bitmap can be updated prior to
952 // executing the body of the if.then or if.else. This is useful for when
953 // there is a 'return' within the body, but this is particularly beneficial
954 // when one if-stmt is nested within another if-stmt so that all of the MC/DC
955 // updates are kept linear and consistent.
956 if (!CGM.getCodeGenOpts().MCDCCoverage) {
957 EmitBranchOnBoolExpr(Cond: S.getCond(), TrueBlock: ThenBlock, FalseBlock: ElseBlock, TrueCount: ThenCount, LH,
958 /*ConditionalOp=*/nullptr,
959 /*ConditionalDecl=*/S.getConditionVariable());
960 } else {
961 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
962 MaybeEmitDeferredVarDeclInit(var: S.getConditionVariable());
963 Builder.CreateCondBr(Cond: BoolCondVal, True: ThenBlock, False: ElseBlock);
964 }
965
966 // Emit the 'then' code.
967 EmitBlock(BB: ThenBlock);
968 if (llvm::EnableSingleByteCoverage)
969 incrementProfileCounter(S: S.getThen());
970 else
971 incrementProfileCounter(S: &S);
972 {
973 RunCleanupsScope ThenScope(*this);
974 EmitStmt(S: S.getThen());
975 }
976 EmitBranch(Target: ContBlock);
977
978 // Emit the 'else' code if present.
979 if (Else) {
980 {
981 // There is no need to emit line number for an unconditional branch.
982 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
983 EmitBlock(BB: ElseBlock);
984 }
985 // When single byte coverage mode is enabled, add a counter to else block.
986 if (llvm::EnableSingleByteCoverage)
987 incrementProfileCounter(S: Else);
988 {
989 RunCleanupsScope ElseScope(*this);
990 EmitStmt(S: Else);
991 }
992 {
993 // There is no need to emit line number for an unconditional branch.
994 auto NL = ApplyDebugLocation::CreateEmpty(CGF&: *this);
995 EmitBranch(Target: ContBlock);
996 }
997 }
998
999 // Emit the continuation block for code after the if.
1000 EmitBlock(BB: ContBlock, IsFinished: true);
1001
1002 // When single byte coverage mode is enabled, add a counter to continuation
1003 // block.
1004 if (llvm::EnableSingleByteCoverage)
1005 incrementProfileCounter(S: &S);
1006}
1007
1008bool CodeGenFunction::checkIfLoopMustProgress(const Expr *ControllingExpression,
1009 bool HasEmptyBody) {
1010 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1011 CodeGenOptions::FiniteLoopsKind::Never)
1012 return false;
1013
1014 // Now apply rules for plain C (see 6.8.5.6 in C11).
1015 // Loops with constant conditions do not have to make progress in any C
1016 // version.
1017 // As an extension, we consisider loops whose constant expression
1018 // can be constant-folded.
1019 Expr::EvalResult Result;
1020 bool CondIsConstInt =
1021 !ControllingExpression ||
1022 (ControllingExpression->EvaluateAsInt(Result, Ctx: getContext()) &&
1023 Result.Val.isInt());
1024
1025 bool CondIsTrue = CondIsConstInt && (!ControllingExpression ||
1026 Result.Val.getInt().getBoolValue());
1027
1028 // Loops with non-constant conditions must make progress in C11 and later.
1029 if (getLangOpts().C11 && !CondIsConstInt)
1030 return true;
1031
1032 // [C++26][intro.progress] (DR)
1033 // The implementation may assume that any thread will eventually do one of the
1034 // following:
1035 // [...]
1036 // - continue execution of a trivial infinite loop ([stmt.iter.general]).
1037 if (CGM.getCodeGenOpts().getFiniteLoops() ==
1038 CodeGenOptions::FiniteLoopsKind::Always ||
1039 getLangOpts().CPlusPlus11) {
1040 if (HasEmptyBody && CondIsTrue) {
1041 CurFn->removeFnAttr(Kind: llvm::Attribute::MustProgress);
1042 return false;
1043 }
1044 return true;
1045 }
1046 return false;
1047}
1048
1049// [C++26][stmt.iter.general] (DR)
1050// A trivially empty iteration statement is an iteration statement matching one
1051// of the following forms:
1052// - while ( expression ) ;
1053// - while ( expression ) { }
1054// - do ; while ( expression ) ;
1055// - do { } while ( expression ) ;
1056// - for ( init-statement expression(opt); ) ;
1057// - for ( init-statement expression(opt); ) { }
1058template <typename LoopStmt> static bool hasEmptyLoopBody(const LoopStmt &S) {
1059 if constexpr (std::is_same_v<LoopStmt, ForStmt>) {
1060 if (S.getInc())
1061 return false;
1062 }
1063 const Stmt *Body = S.getBody();
1064 if (!Body || isa<NullStmt>(Val: Body))
1065 return true;
1066 if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Val: Body))
1067 return Compound->body_empty();
1068 return false;
1069}
1070
1071void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
1072 ArrayRef<const Attr *> WhileAttrs) {
1073 // Emit the header for the loop, which will also become
1074 // the continue target.
1075 JumpDest LoopHeader = getJumpDestInCurrentScope(Name: "while.cond");
1076 EmitBlock(BB: LoopHeader.getBlock());
1077
1078 if (CGM.shouldEmitConvergenceTokens())
1079 ConvergenceTokenStack.push_back(
1080 Elt: emitConvergenceLoopToken(BB: LoopHeader.getBlock()));
1081
1082 // Create an exit block for when the condition fails, which will
1083 // also become the break target.
1084 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "while.end");
1085
1086 // Store the blocks to use for break and continue.
1087 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, LoopHeader));
1088
1089 // C++ [stmt.while]p2:
1090 // When the condition of a while statement is a declaration, the
1091 // scope of the variable that is declared extends from its point
1092 // of declaration (3.3.2) to the end of the while statement.
1093 // [...]
1094 // The object created in a condition is destroyed and created
1095 // with each iteration of the loop.
1096 RunCleanupsScope ConditionScope(*this);
1097
1098 if (S.getConditionVariable())
1099 EmitDecl(D: *S.getConditionVariable());
1100
1101 // Evaluate the conditional in the while header. C99 6.8.5.1: The
1102 // evaluation of the controlling expression takes place before each
1103 // execution of the loop body.
1104 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
1105
1106 MaybeEmitDeferredVarDeclInit(var: S.getConditionVariable());
1107
1108 // while(1) is common, avoid extra exit blocks. Be sure
1109 // to correctly handle break/continue though.
1110 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(Val: BoolCondVal);
1111 bool EmitBoolCondBranch = !C || !C->isOne();
1112 const SourceRange &R = S.getSourceRange();
1113 LoopStack.push(Header: LoopHeader.getBlock(), Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(),
1114 Attrs: WhileAttrs, StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
1115 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()),
1116 MustProgress: checkIfLoopMustProgress(ControllingExpression: S.getCond(), HasEmptyBody: hasEmptyLoopBody(S)));
1117
1118 // When single byte coverage mode is enabled, add a counter to loop condition.
1119 if (llvm::EnableSingleByteCoverage)
1120 incrementProfileCounter(S: S.getCond());
1121
1122 // As long as the condition is true, go to the loop body.
1123 llvm::BasicBlock *LoopBody = createBasicBlock(name: "while.body");
1124 if (EmitBoolCondBranch) {
1125 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1126 if (ConditionScope.requiresCleanups())
1127 ExitBlock = createBasicBlock(name: "while.exit");
1128 llvm::MDNode *Weights =
1129 createProfileWeightsForLoop(Cond: S.getCond(), LoopCount: getProfileCount(S: S.getBody()));
1130 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1131 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1132 Cond: BoolCondVal, LH: Stmt::getLikelihood(S: S.getBody()));
1133 auto *I = Builder.CreateCondBr(Cond: BoolCondVal, True: LoopBody, False: ExitBlock, BranchWeights: Weights);
1134 // Key Instructions: Emit the condition and branch as separate source
1135 // location atoms otherwise we may omit a step onto the loop condition in
1136 // favour of the `while` keyword.
1137 // FIXME: We could have the branch as the backup location for the condition,
1138 // which would probably be a better experience. Explore this later.
1139 if (auto *CondI = dyn_cast<llvm::Instruction>(Val: BoolCondVal))
1140 addInstToNewSourceAtom(KeyInstruction: CondI, Backup: nullptr);
1141 addInstToNewSourceAtom(KeyInstruction: I, Backup: nullptr);
1142
1143 if (ExitBlock != LoopExit.getBlock()) {
1144 EmitBlock(BB: ExitBlock);
1145 EmitBranchThroughCleanup(Dest: LoopExit);
1146 }
1147 } else if (const Attr *A = Stmt::getLikelihoodAttr(S: S.getBody())) {
1148 CGM.getDiags().Report(Loc: A->getLocation(),
1149 DiagID: diag::warn_attribute_has_no_effect_on_infinite_loop)
1150 << A << A->getRange();
1151 CGM.getDiags().Report(
1152 Loc: S.getWhileLoc(),
1153 DiagID: diag::note_attribute_has_no_effect_on_infinite_loop_here)
1154 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
1155 }
1156
1157 // Emit the loop body. We have to emit this in a cleanup scope
1158 // because it might be a singleton DeclStmt.
1159 {
1160 RunCleanupsScope BodyScope(*this);
1161 EmitBlock(BB: LoopBody);
1162 // When single byte coverage mode is enabled, add a counter to the body.
1163 if (llvm::EnableSingleByteCoverage)
1164 incrementProfileCounter(S: S.getBody());
1165 else
1166 incrementProfileCounter(S: &S);
1167 EmitStmt(S: S.getBody());
1168 }
1169
1170 BreakContinueStack.pop_back();
1171
1172 // Immediately force cleanup.
1173 ConditionScope.ForceCleanup();
1174
1175 EmitStopPoint(S: &S);
1176 // Branch to the loop header again.
1177 EmitBranch(Target: LoopHeader.getBlock());
1178
1179 LoopStack.pop();
1180
1181 // Emit the exit block.
1182 EmitBlock(BB: LoopExit.getBlock(), IsFinished: true);
1183
1184 // The LoopHeader typically is just a branch if we skipped emitting
1185 // a branch, try to erase it.
1186 if (!EmitBoolCondBranch)
1187 SimplifyForwardingBlocks(BB: LoopHeader.getBlock());
1188
1189 // When single byte coverage mode is enabled, add a counter to continuation
1190 // block.
1191 if (llvm::EnableSingleByteCoverage)
1192 incrementProfileCounter(S: &S);
1193
1194 if (CGM.shouldEmitConvergenceTokens())
1195 ConvergenceTokenStack.pop_back();
1196}
1197
1198void CodeGenFunction::EmitDoStmt(const DoStmt &S,
1199 ArrayRef<const Attr *> DoAttrs) {
1200 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "do.end");
1201 JumpDest LoopCond = getJumpDestInCurrentScope(Name: "do.cond");
1202
1203 uint64_t ParentCount = getCurrentProfileCount();
1204
1205 // Store the blocks to use for break and continue.
1206 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, LoopCond));
1207
1208 // Emit the body of the loop.
1209 llvm::BasicBlock *LoopBody = createBasicBlock(name: "do.body");
1210
1211 if (llvm::EnableSingleByteCoverage)
1212 EmitBlockWithFallThrough(BB: LoopBody, S: S.getBody());
1213 else
1214 EmitBlockWithFallThrough(BB: LoopBody, S: &S);
1215
1216 if (CGM.shouldEmitConvergenceTokens())
1217 ConvergenceTokenStack.push_back(Elt: emitConvergenceLoopToken(BB: LoopBody));
1218
1219 {
1220 RunCleanupsScope BodyScope(*this);
1221 EmitStmt(S: S.getBody());
1222 }
1223
1224 EmitBlock(BB: LoopCond.getBlock());
1225 // When single byte coverage mode is enabled, add a counter to loop condition.
1226 if (llvm::EnableSingleByteCoverage)
1227 incrementProfileCounter(S: S.getCond());
1228
1229 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
1230 // after each execution of the loop body."
1231
1232 // Evaluate the conditional in the while header.
1233 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1234 // compares unequal to 0. The condition must be a scalar type.
1235 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
1236
1237 BreakContinueStack.pop_back();
1238
1239 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
1240 // to correctly handle break/continue though.
1241 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(Val: BoolCondVal);
1242 bool EmitBoolCondBranch = !C || !C->isZero();
1243
1244 const SourceRange &R = S.getSourceRange();
1245 LoopStack.push(Header: LoopBody, Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(), Attrs: DoAttrs,
1246 StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
1247 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()),
1248 MustProgress: checkIfLoopMustProgress(ControllingExpression: S.getCond(), HasEmptyBody: hasEmptyLoopBody(S)));
1249
1250 // As long as the condition is true, iterate the loop.
1251 if (EmitBoolCondBranch) {
1252 uint64_t BackedgeCount = getProfileCount(S: S.getBody()) - ParentCount;
1253 auto *I = Builder.CreateCondBr(
1254 Cond: BoolCondVal, True: LoopBody, False: LoopExit.getBlock(),
1255 BranchWeights: createProfileWeightsForLoop(Cond: S.getCond(), LoopCount: BackedgeCount));
1256
1257 // Key Instructions: Emit the condition and branch as separate source
1258 // location atoms otherwise we may omit a step onto the loop condition in
1259 // favour of the closing brace.
1260 // FIXME: We could have the branch as the backup location for the condition,
1261 // which would probably be a better experience (no jumping to the brace).
1262 if (auto *CondI = dyn_cast<llvm::Instruction>(Val: BoolCondVal))
1263 addInstToNewSourceAtom(KeyInstruction: CondI, Backup: nullptr);
1264 addInstToNewSourceAtom(KeyInstruction: I, Backup: nullptr);
1265 }
1266
1267 LoopStack.pop();
1268
1269 // Emit the exit block.
1270 EmitBlock(BB: LoopExit.getBlock());
1271
1272 // The DoCond block typically is just a branch if we skipped
1273 // emitting a branch, try to erase it.
1274 if (!EmitBoolCondBranch)
1275 SimplifyForwardingBlocks(BB: LoopCond.getBlock());
1276
1277 // When single byte coverage mode is enabled, add a counter to continuation
1278 // block.
1279 if (llvm::EnableSingleByteCoverage)
1280 incrementProfileCounter(S: &S);
1281
1282 if (CGM.shouldEmitConvergenceTokens())
1283 ConvergenceTokenStack.pop_back();
1284}
1285
1286void CodeGenFunction::EmitForStmt(const ForStmt &S,
1287 ArrayRef<const Attr *> ForAttrs) {
1288 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "for.end");
1289
1290 LexicalScope ForScope(*this, S.getSourceRange());
1291
1292 // Evaluate the first part before the loop.
1293 if (S.getInit())
1294 EmitStmt(S: S.getInit());
1295
1296 // Start the loop with a block that tests the condition.
1297 // If there's an increment, the continue scope will be overwritten
1298 // later.
1299 JumpDest CondDest = getJumpDestInCurrentScope(Name: "for.cond");
1300 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1301 EmitBlock(BB: CondBlock);
1302
1303 if (CGM.shouldEmitConvergenceTokens())
1304 ConvergenceTokenStack.push_back(Elt: emitConvergenceLoopToken(BB: CondBlock));
1305
1306 const SourceRange &R = S.getSourceRange();
1307 LoopStack.push(Header: CondBlock, Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(), Attrs: ForAttrs,
1308 StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
1309 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()),
1310 MustProgress: checkIfLoopMustProgress(ControllingExpression: S.getCond(), HasEmptyBody: hasEmptyLoopBody(S)));
1311
1312 // Create a cleanup scope for the condition variable cleanups.
1313 LexicalScope ConditionScope(*this, S.getSourceRange());
1314
1315 // If the for loop doesn't have an increment we can just use the condition as
1316 // the continue block. Otherwise, if there is no condition variable, we can
1317 // form the continue block now. If there is a condition variable, we can't
1318 // form the continue block until after we've emitted the condition, because
1319 // the condition is in scope in the increment, but Sema's jump diagnostics
1320 // ensure that there are no continues from the condition variable that jump
1321 // to the loop increment.
1322 JumpDest Continue;
1323 if (!S.getInc())
1324 Continue = CondDest;
1325 else if (!S.getConditionVariable())
1326 Continue = getJumpDestInCurrentScope(Name: "for.inc");
1327 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, Continue));
1328
1329 if (S.getCond()) {
1330 // If the for statement has a condition scope, emit the local variable
1331 // declaration.
1332 if (S.getConditionVariable()) {
1333 EmitDecl(D: *S.getConditionVariable());
1334
1335 // We have entered the condition variable's scope, so we're now able to
1336 // jump to the continue block.
1337 Continue = S.getInc() ? getJumpDestInCurrentScope(Name: "for.inc") : CondDest;
1338 BreakContinueStack.back().ContinueBlock = Continue;
1339 }
1340
1341 // When single byte coverage mode is enabled, add a counter to loop
1342 // condition.
1343 if (llvm::EnableSingleByteCoverage)
1344 incrementProfileCounter(S: S.getCond());
1345
1346 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1347 // If there are any cleanups between here and the loop-exit scope,
1348 // create a block to stage a loop exit along.
1349 if (ForScope.requiresCleanups())
1350 ExitBlock = createBasicBlock(name: "for.cond.cleanup");
1351
1352 // As long as the condition is true, iterate the loop.
1353 llvm::BasicBlock *ForBody = createBasicBlock(name: "for.body");
1354
1355 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1356 // compares unequal to 0. The condition must be a scalar type.
1357 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
1358
1359 MaybeEmitDeferredVarDeclInit(var: S.getConditionVariable());
1360
1361 llvm::MDNode *Weights =
1362 createProfileWeightsForLoop(Cond: S.getCond(), LoopCount: getProfileCount(S: S.getBody()));
1363 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1364 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1365 Cond: BoolCondVal, LH: Stmt::getLikelihood(S: S.getBody()));
1366
1367 auto *I = Builder.CreateCondBr(Cond: BoolCondVal, True: ForBody, False: ExitBlock, BranchWeights: Weights);
1368 // Key Instructions: Emit the condition and branch as separate atoms to
1369 // match existing loop stepping behaviour. FIXME: We could have the branch
1370 // as the backup location for the condition, which would probably be a
1371 // better experience (no jumping to the brace).
1372 if (auto *CondI = dyn_cast<llvm::Instruction>(Val: BoolCondVal))
1373 addInstToNewSourceAtom(KeyInstruction: CondI, Backup: nullptr);
1374 addInstToNewSourceAtom(KeyInstruction: I, Backup: nullptr);
1375
1376 if (ExitBlock != LoopExit.getBlock()) {
1377 EmitBlock(BB: ExitBlock);
1378 EmitBranchThroughCleanup(Dest: LoopExit);
1379 }
1380
1381 EmitBlock(BB: ForBody);
1382 } else {
1383 // Treat it as a non-zero constant. Don't even create a new block for the
1384 // body, just fall into it.
1385 }
1386
1387 // When single byte coverage mode is enabled, add a counter to the body.
1388 if (llvm::EnableSingleByteCoverage)
1389 incrementProfileCounter(S: S.getBody());
1390 else
1391 incrementProfileCounter(S: &S);
1392 {
1393 // Create a separate cleanup scope for the body, in case it is not
1394 // a compound statement.
1395 RunCleanupsScope BodyScope(*this);
1396 EmitStmt(S: S.getBody());
1397 }
1398
1399 // The last block in the loop's body (which unconditionally branches to the
1400 // `inc` block if there is one).
1401 auto *FinalBodyBB = Builder.GetInsertBlock();
1402
1403 // If there is an increment, emit it next.
1404 if (S.getInc()) {
1405 EmitBlock(BB: Continue.getBlock());
1406 EmitStmt(S: S.getInc());
1407 if (llvm::EnableSingleByteCoverage)
1408 incrementProfileCounter(S: S.getInc());
1409 }
1410
1411 BreakContinueStack.pop_back();
1412
1413 ConditionScope.ForceCleanup();
1414
1415 EmitStopPoint(S: &S);
1416 EmitBranch(Target: CondBlock);
1417
1418 ForScope.ForceCleanup();
1419
1420 LoopStack.pop();
1421
1422 // Emit the fall-through block.
1423 EmitBlock(BB: LoopExit.getBlock(), IsFinished: true);
1424
1425 // When single byte coverage mode is enabled, add a counter to continuation
1426 // block.
1427 if (llvm::EnableSingleByteCoverage)
1428 incrementProfileCounter(S: &S);
1429
1430 if (CGM.shouldEmitConvergenceTokens())
1431 ConvergenceTokenStack.pop_back();
1432
1433 if (FinalBodyBB) {
1434 // Key Instructions: We want the for closing brace to be step-able on to
1435 // match existing behaviour.
1436 addInstToNewSourceAtom(KeyInstruction: FinalBodyBB->getTerminator(), Backup: nullptr);
1437 }
1438}
1439
1440void
1441CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1442 ArrayRef<const Attr *> ForAttrs) {
1443 JumpDest LoopExit = getJumpDestInCurrentScope(Name: "for.end");
1444
1445 LexicalScope ForScope(*this, S.getSourceRange());
1446
1447 // Evaluate the first pieces before the loop.
1448 if (S.getInit())
1449 EmitStmt(S: S.getInit());
1450 EmitStmt(S: S.getRangeStmt());
1451 EmitStmt(S: S.getBeginStmt());
1452 EmitStmt(S: S.getEndStmt());
1453
1454 // Start the loop with a block that tests the condition.
1455 // If there's an increment, the continue scope will be overwritten
1456 // later.
1457 llvm::BasicBlock *CondBlock = createBasicBlock(name: "for.cond");
1458 EmitBlock(BB: CondBlock);
1459
1460 if (CGM.shouldEmitConvergenceTokens())
1461 ConvergenceTokenStack.push_back(Elt: emitConvergenceLoopToken(BB: CondBlock));
1462
1463 const SourceRange &R = S.getSourceRange();
1464 LoopStack.push(Header: CondBlock, Ctx&: CGM.getContext(), CGOpts: CGM.getCodeGenOpts(), Attrs: ForAttrs,
1465 StartLoc: SourceLocToDebugLoc(Location: R.getBegin()),
1466 EndLoc: SourceLocToDebugLoc(Location: R.getEnd()));
1467
1468 // If there are any cleanups between here and the loop-exit scope,
1469 // create a block to stage a loop exit along.
1470 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1471 if (ForScope.requiresCleanups())
1472 ExitBlock = createBasicBlock(name: "for.cond.cleanup");
1473
1474 // The loop body, consisting of the specified body and the loop variable.
1475 llvm::BasicBlock *ForBody = createBasicBlock(name: "for.body");
1476
1477 // The body is executed if the expression, contextually converted
1478 // to bool, is true.
1479 llvm::Value *BoolCondVal = EvaluateExprAsBool(E: S.getCond());
1480 llvm::MDNode *Weights =
1481 createProfileWeightsForLoop(Cond: S.getCond(), LoopCount: getProfileCount(S: S.getBody()));
1482 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1483 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1484 Cond: BoolCondVal, LH: Stmt::getLikelihood(S: S.getBody()));
1485 auto *I = Builder.CreateCondBr(Cond: BoolCondVal, True: ForBody, False: ExitBlock, BranchWeights: Weights);
1486 // Key Instructions: Emit the condition and branch as separate atoms to
1487 // match existing loop stepping behaviour. FIXME: We could have the branch as
1488 // the backup location for the condition, which would probably be a better
1489 // experience.
1490 if (auto *CondI = dyn_cast<llvm::Instruction>(Val: BoolCondVal))
1491 addInstToNewSourceAtom(KeyInstruction: CondI, Backup: nullptr);
1492 addInstToNewSourceAtom(KeyInstruction: I, Backup: nullptr);
1493
1494 if (ExitBlock != LoopExit.getBlock()) {
1495 EmitBlock(BB: ExitBlock);
1496 EmitBranchThroughCleanup(Dest: LoopExit);
1497 }
1498
1499 EmitBlock(BB: ForBody);
1500 if (llvm::EnableSingleByteCoverage)
1501 incrementProfileCounter(S: S.getBody());
1502 else
1503 incrementProfileCounter(S: &S);
1504
1505 // Create a block for the increment. In case of a 'continue', we jump there.
1506 JumpDest Continue = getJumpDestInCurrentScope(Name: "for.inc");
1507
1508 // Store the blocks to use for break and continue.
1509 BreakContinueStack.push_back(Elt: BreakContinue(LoopExit, Continue));
1510
1511 {
1512 // Create a separate cleanup scope for the loop variable and body.
1513 LexicalScope BodyScope(*this, S.getSourceRange());
1514 EmitStmt(S: S.getLoopVarStmt());
1515 EmitStmt(S: S.getBody());
1516 }
1517 // The last block in the loop's body (which unconditionally branches to the
1518 // `inc` block if there is one).
1519 auto *FinalBodyBB = Builder.GetInsertBlock();
1520
1521 EmitStopPoint(S: &S);
1522 // If there is an increment, emit it next.
1523 EmitBlock(BB: Continue.getBlock());
1524 EmitStmt(S: S.getInc());
1525
1526 BreakContinueStack.pop_back();
1527
1528 EmitBranch(Target: CondBlock);
1529
1530 ForScope.ForceCleanup();
1531
1532 LoopStack.pop();
1533
1534 // Emit the fall-through block.
1535 EmitBlock(BB: LoopExit.getBlock(), IsFinished: true);
1536
1537 // When single byte coverage mode is enabled, add a counter to continuation
1538 // block.
1539 if (llvm::EnableSingleByteCoverage)
1540 incrementProfileCounter(S: &S);
1541
1542 if (CGM.shouldEmitConvergenceTokens())
1543 ConvergenceTokenStack.pop_back();
1544
1545 if (FinalBodyBB) {
1546 // We want the for closing brace to be step-able on to match existing
1547 // behaviour.
1548 addInstToNewSourceAtom(KeyInstruction: FinalBodyBB->getTerminator(), Backup: nullptr);
1549 }
1550}
1551
1552void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1553 if (RV.isScalar()) {
1554 Builder.CreateStore(Val: RV.getScalarVal(), Addr: ReturnValue);
1555 } else if (RV.isAggregate()) {
1556 LValue Dest = MakeAddrLValue(Addr: ReturnValue, T: Ty);
1557 LValue Src = MakeAddrLValue(Addr: RV.getAggregateAddress(), T: Ty);
1558 EmitAggregateCopy(Dest, Src, EltTy: Ty, MayOverlap: getOverlapForReturnValue());
1559 } else {
1560 EmitStoreOfComplex(V: RV.getComplexVal(), dest: MakeAddrLValue(Addr: ReturnValue, T: Ty),
1561 /*init*/ isInit: true);
1562 }
1563 EmitBranchThroughCleanup(Dest: ReturnBlock);
1564}
1565
1566namespace {
1567// RAII struct used to save and restore a return statment's result expression.
1568struct SaveRetExprRAII {
1569 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1570 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1571 CGF.RetExpr = RetExpr;
1572 }
1573 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1574 const Expr *OldRetExpr;
1575 CodeGenFunction &CGF;
1576};
1577} // namespace
1578
1579/// Determine if the given call uses the swiftasync calling convention.
1580static bool isSwiftAsyncCallee(const CallExpr *CE) {
1581 auto calleeQualType = CE->getCallee()->getType();
1582 const FunctionType *calleeType = nullptr;
1583 if (calleeQualType->isFunctionPointerType() ||
1584 calleeQualType->isFunctionReferenceType() ||
1585 calleeQualType->isBlockPointerType() ||
1586 calleeQualType->isMemberFunctionPointerType()) {
1587 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1588 } else if (auto *ty = dyn_cast<FunctionType>(Val&: calleeQualType)) {
1589 calleeType = ty;
1590 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(Val: CE)) {
1591 if (auto methodDecl = CMCE->getMethodDecl()) {
1592 // getMethodDecl() doesn't handle member pointers at the moment.
1593 calleeType = methodDecl->getType()->castAs<FunctionType>();
1594 } else {
1595 return false;
1596 }
1597 } else {
1598 return false;
1599 }
1600 return calleeType->getCallConv() == CallingConv::CC_SwiftAsync;
1601}
1602
1603/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1604/// if the function returns void, or may be missing one if the function returns
1605/// non-void. Fun stuff :).
1606void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1607 ApplyAtomGroup Grp(getDebugInfo());
1608 if (requiresReturnValueCheck()) {
1609 llvm::Constant *SLoc = EmitCheckSourceLocation(Loc: S.getBeginLoc());
1610 auto *SLocPtr =
1611 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1612 llvm::GlobalVariable::PrivateLinkage, SLoc);
1613 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1614 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV: SLocPtr);
1615 assert(ReturnLocation.isValid() && "No valid return location");
1616 Builder.CreateStore(Val: SLocPtr, Addr: ReturnLocation);
1617 }
1618
1619 // Returning from an outlined SEH helper is UB, and we already warn on it.
1620 if (IsOutlinedSEHHelper) {
1621 Builder.CreateUnreachable();
1622 Builder.ClearInsertionPoint();
1623 }
1624
1625 // Emit the result value, even if unused, to evaluate the side effects.
1626 const Expr *RV = S.getRetValue();
1627
1628 // Record the result expression of the return statement. The recorded
1629 // expression is used to determine whether a block capture's lifetime should
1630 // end at the end of the full expression as opposed to the end of the scope
1631 // enclosing the block expression.
1632 //
1633 // This permits a small, easily-implemented exception to our over-conservative
1634 // rules about not jumping to statements following block literals with
1635 // non-trivial cleanups.
1636 SaveRetExprRAII SaveRetExpr(RV, *this);
1637
1638 RunCleanupsScope cleanupScope(*this);
1639 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(Val: RV))
1640 RV = EWC->getSubExpr();
1641
1642 // If we're in a swiftasynccall function, and the return expression is a
1643 // call to a swiftasynccall function, mark the call as the musttail call.
1644 std::optional<llvm::SaveAndRestore<const CallExpr *>> SaveMustTail;
1645 if (RV && CurFnInfo &&
1646 CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync) {
1647 if (auto CE = dyn_cast<CallExpr>(Val: RV)) {
1648 if (isSwiftAsyncCallee(CE)) {
1649 SaveMustTail.emplace(args&: MustTailCall, args&: CE);
1650 }
1651 }
1652 }
1653
1654 // FIXME: Clean this up by using an LValue for ReturnTemp,
1655 // EmitStoreThroughLValue, and EmitAnyExpr.
1656 // Check if the NRVO candidate was not globalized in OpenMP mode.
1657 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1658 S.getNRVOCandidate()->isNRVOVariable() &&
1659 (!getLangOpts().OpenMP ||
1660 !CGM.getOpenMPRuntime()
1661 .getAddressOfLocalVariable(CGF&: *this, VD: S.getNRVOCandidate())
1662 .isValid())) {
1663 // Apply the named return value optimization for this return statement,
1664 // which means doing nothing: the appropriate result has already been
1665 // constructed into the NRVO variable.
1666
1667 // If there is an NRVO flag for this variable, set it to 1 into indicate
1668 // that the cleanup code should not destroy the variable.
1669 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1670 Builder.CreateFlagStore(Value: Builder.getTrue(), Addr: NRVOFlag);
1671 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1672 // Make sure not to return anything, but evaluate the expression
1673 // for side effects.
1674 if (RV) {
1675 EmitAnyExpr(E: RV);
1676 }
1677 } else if (!RV) {
1678 // Do nothing (return value is left uninitialized)
1679 } else if (FnRetTy->isReferenceType()) {
1680 // If this function returns a reference, take the address of the expression
1681 // rather than the value.
1682 RValue Result = EmitReferenceBindingToExpr(E: RV);
1683 auto *I = Builder.CreateStore(Val: Result.getScalarVal(), Addr: ReturnValue);
1684 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: I->getValueOperand());
1685 } else {
1686 switch (getEvaluationKind(T: RV->getType())) {
1687 case TEK_Scalar: {
1688 llvm::Value *Ret = EmitScalarExpr(E: RV);
1689 if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1690 EmitStoreOfScalar(value: Ret, lvalue: MakeAddrLValue(Addr: ReturnValue, T: RV->getType()),
1691 /*isInit*/ true);
1692 } else {
1693 auto *I = Builder.CreateStore(Val: Ret, Addr: ReturnValue);
1694 addInstToCurrentSourceAtom(KeyInstruction: I, Backup: I->getValueOperand());
1695 }
1696 break;
1697 }
1698 case TEK_Complex:
1699 EmitComplexExprIntoLValue(E: RV, dest: MakeAddrLValue(Addr: ReturnValue, T: RV->getType()),
1700 /*isInit*/ true);
1701 break;
1702 case TEK_Aggregate:
1703 EmitAggExpr(E: RV, AS: AggValueSlot::forAddr(
1704 addr: ReturnValue, quals: Qualifiers(),
1705 isDestructed: AggValueSlot::IsDestructed,
1706 needsGC: AggValueSlot::DoesNotNeedGCBarriers,
1707 isAliased: AggValueSlot::IsNotAliased,
1708 mayOverlap: getOverlapForReturnValue()));
1709 break;
1710 }
1711 }
1712
1713 ++NumReturnExprs;
1714 if (!RV || RV->isEvaluatable(Ctx: getContext()))
1715 ++NumSimpleReturnExprs;
1716
1717 cleanupScope.ForceCleanup();
1718 EmitBranchThroughCleanup(Dest: ReturnBlock);
1719}
1720
1721void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1722 // As long as debug info is modeled with instructions, we have to ensure we
1723 // have a place to insert here and write the stop point here.
1724 if (HaveInsertPoint())
1725 EmitStopPoint(S: &S);
1726
1727 for (const auto *I : S.decls())
1728 EmitDecl(D: *I, /*EvaluateConditionDecl=*/true);
1729}
1730
1731void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1732 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1733
1734 // If this code is reachable then emit a stop point (if generating
1735 // debug info). We have to do this ourselves because we are on the
1736 // "simple" statement path.
1737 if (HaveInsertPoint())
1738 EmitStopPoint(S: &S);
1739
1740 ApplyAtomGroup Grp(getDebugInfo());
1741 EmitBranchThroughCleanup(Dest: BreakContinueStack.back().BreakBlock);
1742}
1743
1744void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1745 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1746
1747 // If this code is reachable then emit a stop point (if generating
1748 // debug info). We have to do this ourselves because we are on the
1749 // "simple" statement path.
1750 if (HaveInsertPoint())
1751 EmitStopPoint(S: &S);
1752
1753 ApplyAtomGroup Grp(getDebugInfo());
1754 EmitBranchThroughCleanup(Dest: BreakContinueStack.back().ContinueBlock);
1755}
1756
1757/// EmitCaseStmtRange - If case statement range is not too big then
1758/// add multiple cases to switch instruction, one for each value within
1759/// the range. If range is too big then emit "if" condition check.
1760void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1761 ArrayRef<const Attr *> Attrs) {
1762 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1763
1764 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(Ctx: getContext());
1765 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(Ctx: getContext());
1766
1767 // Emit the code for this case. We do this first to make sure it is
1768 // properly chained from our predecessor before generating the
1769 // switch machinery to enter this block.
1770 llvm::BasicBlock *CaseDest = createBasicBlock(name: "sw.bb");
1771 EmitBlockWithFallThrough(BB: CaseDest, S: &S);
1772 EmitStmt(S: S.getSubStmt());
1773
1774 // If range is empty, do nothing.
1775 if (LHS.isSigned() ? RHS.slt(RHS: LHS) : RHS.ult(RHS: LHS))
1776 return;
1777
1778 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1779 llvm::APInt Range = RHS - LHS;
1780 // FIXME: parameters such as this should not be hardcoded.
1781 if (Range.ult(RHS: llvm::APInt(Range.getBitWidth(), 64))) {
1782 // Range is small enough to add multiple switch instruction cases.
1783 uint64_t Total = getProfileCount(S: &S);
1784 unsigned NCases = Range.getZExtValue() + 1;
1785 // We only have one region counter for the entire set of cases here, so we
1786 // need to divide the weights evenly between the generated cases, ensuring
1787 // that the total weight is preserved. E.g., a weight of 5 over three cases
1788 // will be distributed as weights of 2, 2, and 1.
1789 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1790 for (unsigned I = 0; I != NCases; ++I) {
1791 if (SwitchWeights)
1792 SwitchWeights->push_back(Elt: Weight + (Rem ? 1 : 0));
1793 else if (SwitchLikelihood)
1794 SwitchLikelihood->push_back(Elt: LH);
1795
1796 if (Rem)
1797 Rem--;
1798 SwitchInsn->addCase(OnVal: Builder.getInt(AI: LHS), Dest: CaseDest);
1799 ++LHS;
1800 }
1801 return;
1802 }
1803
1804 // The range is too big. Emit "if" condition into a new block,
1805 // making sure to save and restore the current insertion point.
1806 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1807
1808 // Push this test onto the chain of range checks (which terminates
1809 // in the default basic block). The switch's default will be changed
1810 // to the top of this chain after switch emission is complete.
1811 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1812 CaseRangeBlock = createBasicBlock(name: "sw.caserange");
1813
1814 CurFn->insert(Position: CurFn->end(), BB: CaseRangeBlock);
1815 Builder.SetInsertPoint(CaseRangeBlock);
1816
1817 // Emit range check.
1818 llvm::Value *Diff =
1819 Builder.CreateSub(LHS: SwitchInsn->getCondition(), RHS: Builder.getInt(AI: LHS));
1820 llvm::Value *Cond =
1821 Builder.CreateICmpULE(LHS: Diff, RHS: Builder.getInt(AI: Range), Name: "inbounds");
1822
1823 llvm::MDNode *Weights = nullptr;
1824 if (SwitchWeights) {
1825 uint64_t ThisCount = getProfileCount(S: &S);
1826 uint64_t DefaultCount = (*SwitchWeights)[0];
1827 Weights = createProfileWeights(TrueCount: ThisCount, FalseCount: DefaultCount);
1828
1829 // Since we're chaining the switch default through each large case range, we
1830 // need to update the weight for the default, ie, the first case, to include
1831 // this case.
1832 (*SwitchWeights)[0] += ThisCount;
1833 } else if (SwitchLikelihood)
1834 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1835
1836 Builder.CreateCondBr(Cond, True: CaseDest, False: FalseDest, BranchWeights: Weights);
1837
1838 // Restore the appropriate insertion point.
1839 if (RestoreBB)
1840 Builder.SetInsertPoint(RestoreBB);
1841 else
1842 Builder.ClearInsertionPoint();
1843}
1844
1845void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1846 ArrayRef<const Attr *> Attrs) {
1847 // If there is no enclosing switch instance that we're aware of, then this
1848 // case statement and its block can be elided. This situation only happens
1849 // when we've constant-folded the switch, are emitting the constant case,
1850 // and part of the constant case includes another case statement. For
1851 // instance: switch (4) { case 4: do { case 5: } while (1); }
1852 if (!SwitchInsn) {
1853 EmitStmt(S: S.getSubStmt());
1854 return;
1855 }
1856
1857 // Handle case ranges.
1858 if (S.getRHS()) {
1859 EmitCaseStmtRange(S, Attrs);
1860 return;
1861 }
1862
1863 llvm::ConstantInt *CaseVal =
1864 Builder.getInt(AI: S.getLHS()->EvaluateKnownConstInt(Ctx: getContext()));
1865
1866 // Emit debuginfo for the case value if it is an enum value.
1867 const ConstantExpr *CE;
1868 if (auto ICE = dyn_cast<ImplicitCastExpr>(Val: S.getLHS()))
1869 CE = dyn_cast<ConstantExpr>(Val: ICE->getSubExpr());
1870 else
1871 CE = dyn_cast<ConstantExpr>(Val: S.getLHS());
1872 if (CE) {
1873 if (auto DE = dyn_cast<DeclRefExpr>(Val: CE->getSubExpr()))
1874 if (CGDebugInfo *Dbg = getDebugInfo())
1875 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1876 Dbg->EmitGlobalVariable(VD: DE->getDecl(),
1877 Init: APValue(llvm::APSInt(CaseVal->getValue())));
1878 }
1879
1880 if (SwitchLikelihood)
1881 SwitchLikelihood->push_back(Elt: Stmt::getLikelihood(Attrs));
1882
1883 // If the body of the case is just a 'break', try to not emit an empty block.
1884 // If we're profiling or we're not optimizing, leave the block in for better
1885 // debug and coverage analysis.
1886 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1887 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1888 isa<BreakStmt>(Val: S.getSubStmt())) {
1889 JumpDest Block = BreakContinueStack.back().BreakBlock;
1890
1891 // Only do this optimization if there are no cleanups that need emitting.
1892 if (isObviouslyBranchWithoutCleanups(Dest: Block)) {
1893 if (SwitchWeights)
1894 SwitchWeights->push_back(Elt: getProfileCount(S: &S));
1895 SwitchInsn->addCase(OnVal: CaseVal, Dest: Block.getBlock());
1896
1897 // If there was a fallthrough into this case, make sure to redirect it to
1898 // the end of the switch as well.
1899 if (Builder.GetInsertBlock()) {
1900 Builder.CreateBr(Dest: Block.getBlock());
1901 Builder.ClearInsertionPoint();
1902 }
1903 return;
1904 }
1905 }
1906
1907 llvm::BasicBlock *CaseDest = createBasicBlock(name: "sw.bb");
1908 EmitBlockWithFallThrough(BB: CaseDest, S: &S);
1909 if (SwitchWeights)
1910 SwitchWeights->push_back(Elt: getProfileCount(S: &S));
1911 SwitchInsn->addCase(OnVal: CaseVal, Dest: CaseDest);
1912
1913 // Recursively emitting the statement is acceptable, but is not wonderful for
1914 // code where we have many case statements nested together, i.e.:
1915 // case 1:
1916 // case 2:
1917 // case 3: etc.
1918 // Handling this recursively will create a new block for each case statement
1919 // that falls through to the next case which is IR intensive. It also causes
1920 // deep recursion which can run into stack depth limitations. Handle
1921 // sequential non-range case statements specially.
1922 //
1923 // TODO When the next case has a likelihood attribute the code returns to the
1924 // recursive algorithm. Maybe improve this case if it becomes common practice
1925 // to use a lot of attributes.
1926 const CaseStmt *CurCase = &S;
1927 const CaseStmt *NextCase = dyn_cast<CaseStmt>(Val: S.getSubStmt());
1928
1929 // Otherwise, iteratively add consecutive cases to this switch stmt.
1930 while (NextCase && NextCase->getRHS() == nullptr) {
1931 CurCase = NextCase;
1932 llvm::ConstantInt *CaseVal =
1933 Builder.getInt(AI: CurCase->getLHS()->EvaluateKnownConstInt(Ctx: getContext()));
1934
1935 if (SwitchWeights)
1936 SwitchWeights->push_back(Elt: getProfileCount(S: NextCase));
1937 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1938 CaseDest = createBasicBlock(name: "sw.bb");
1939 EmitBlockWithFallThrough(BB: CaseDest, S: CurCase);
1940 }
1941 // Since this loop is only executed when the CaseStmt has no attributes
1942 // use a hard-coded value.
1943 if (SwitchLikelihood)
1944 SwitchLikelihood->push_back(Elt: Stmt::LH_None);
1945
1946 SwitchInsn->addCase(OnVal: CaseVal, Dest: CaseDest);
1947 NextCase = dyn_cast<CaseStmt>(Val: CurCase->getSubStmt());
1948 }
1949
1950 // Generate a stop point for debug info if the case statement is
1951 // followed by a default statement. A fallthrough case before a
1952 // default case gets its own branch target.
1953 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1954 EmitStopPoint(S: CurCase);
1955
1956 // Normal default recursion for non-cases.
1957 EmitStmt(S: CurCase->getSubStmt());
1958}
1959
1960void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1961 ArrayRef<const Attr *> Attrs) {
1962 // If there is no enclosing switch instance that we're aware of, then this
1963 // default statement can be elided. This situation only happens when we've
1964 // constant-folded the switch.
1965 if (!SwitchInsn) {
1966 EmitStmt(S: S.getSubStmt());
1967 return;
1968 }
1969
1970 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1971 assert(DefaultBlock->empty() &&
1972 "EmitDefaultStmt: Default block already defined?");
1973
1974 if (SwitchLikelihood)
1975 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1976
1977 EmitBlockWithFallThrough(BB: DefaultBlock, S: &S);
1978
1979 EmitStmt(S: S.getSubStmt());
1980}
1981
1982/// CollectStatementsForCase - Given the body of a 'switch' statement and a
1983/// constant value that is being switched on, see if we can dead code eliminate
1984/// the body of the switch to a simple series of statements to emit. Basically,
1985/// on a switch (5) we want to find these statements:
1986/// case 5:
1987/// printf(...); <--
1988/// ++i; <--
1989/// break;
1990///
1991/// and add them to the ResultStmts vector. If it is unsafe to do this
1992/// transformation (for example, one of the elided statements contains a label
1993/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1994/// should include statements after it (e.g. the printf() line is a substmt of
1995/// the case) then return CSFC_FallThrough. If we handled it and found a break
1996/// statement, then return CSFC_Success.
1997///
1998/// If Case is non-null, then we are looking for the specified case, checking
1999/// that nothing we jump over contains labels. If Case is null, then we found
2000/// the case and are looking for the break.
2001///
2002/// If the recursive walk actually finds our Case, then we set FoundCase to
2003/// true.
2004///
2005enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
2006static CSFC_Result CollectStatementsForCase(const Stmt *S,
2007 const SwitchCase *Case,
2008 bool &FoundCase,
2009 SmallVectorImpl<const Stmt*> &ResultStmts) {
2010 // If this is a null statement, just succeed.
2011 if (!S)
2012 return Case ? CSFC_Success : CSFC_FallThrough;
2013
2014 // If this is the switchcase (case 4: or default) that we're looking for, then
2015 // we're in business. Just add the substatement.
2016 if (const SwitchCase *SC = dyn_cast<SwitchCase>(Val: S)) {
2017 if (S == Case) {
2018 FoundCase = true;
2019 return CollectStatementsForCase(S: SC->getSubStmt(), Case: nullptr, FoundCase,
2020 ResultStmts);
2021 }
2022
2023 // Otherwise, this is some other case or default statement, just ignore it.
2024 return CollectStatementsForCase(S: SC->getSubStmt(), Case, FoundCase,
2025 ResultStmts);
2026 }
2027
2028 // If we are in the live part of the code and we found our break statement,
2029 // return a success!
2030 if (!Case && isa<BreakStmt>(Val: S))
2031 return CSFC_Success;
2032
2033 // If this is a switch statement, then it might contain the SwitchCase, the
2034 // break, or neither.
2035 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(Val: S)) {
2036 // Handle this as two cases: we might be looking for the SwitchCase (if so
2037 // the skipped statements must be skippable) or we might already have it.
2038 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
2039 bool StartedInLiveCode = FoundCase;
2040 unsigned StartSize = ResultStmts.size();
2041
2042 // If we've not found the case yet, scan through looking for it.
2043 if (Case) {
2044 // Keep track of whether we see a skipped declaration. The code could be
2045 // using the declaration even if it is skipped, so we can't optimize out
2046 // the decl if the kept statements might refer to it.
2047 bool HadSkippedDecl = false;
2048
2049 // If we're looking for the case, just see if we can skip each of the
2050 // substatements.
2051 for (; Case && I != E; ++I) {
2052 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(S: *I);
2053
2054 switch (CollectStatementsForCase(S: *I, Case, FoundCase, ResultStmts)) {
2055 case CSFC_Failure: return CSFC_Failure;
2056 case CSFC_Success:
2057 // A successful result means that either 1) that the statement doesn't
2058 // have the case and is skippable, or 2) does contain the case value
2059 // and also contains the break to exit the switch. In the later case,
2060 // we just verify the rest of the statements are elidable.
2061 if (FoundCase) {
2062 // If we found the case and skipped declarations, we can't do the
2063 // optimization.
2064 if (HadSkippedDecl)
2065 return CSFC_Failure;
2066
2067 for (++I; I != E; ++I)
2068 if (CodeGenFunction::ContainsLabel(S: *I, IgnoreCaseStmts: true))
2069 return CSFC_Failure;
2070 return CSFC_Success;
2071 }
2072 break;
2073 case CSFC_FallThrough:
2074 // If we have a fallthrough condition, then we must have found the
2075 // case started to include statements. Consider the rest of the
2076 // statements in the compound statement as candidates for inclusion.
2077 assert(FoundCase && "Didn't find case but returned fallthrough?");
2078 // We recursively found Case, so we're not looking for it anymore.
2079 Case = nullptr;
2080
2081 // If we found the case and skipped declarations, we can't do the
2082 // optimization.
2083 if (HadSkippedDecl)
2084 return CSFC_Failure;
2085 break;
2086 }
2087 }
2088
2089 if (!FoundCase)
2090 return CSFC_Success;
2091
2092 assert(!HadSkippedDecl && "fallthrough after skipping decl");
2093 }
2094
2095 // If we have statements in our range, then we know that the statements are
2096 // live and need to be added to the set of statements we're tracking.
2097 bool AnyDecls = false;
2098 for (; I != E; ++I) {
2099 AnyDecls |= CodeGenFunction::mightAddDeclToScope(S: *I);
2100
2101 switch (CollectStatementsForCase(S: *I, Case: nullptr, FoundCase, ResultStmts)) {
2102 case CSFC_Failure: return CSFC_Failure;
2103 case CSFC_FallThrough:
2104 // A fallthrough result means that the statement was simple and just
2105 // included in ResultStmt, keep adding them afterwards.
2106 break;
2107 case CSFC_Success:
2108 // A successful result means that we found the break statement and
2109 // stopped statement inclusion. We just ensure that any leftover stmts
2110 // are skippable and return success ourselves.
2111 for (++I; I != E; ++I)
2112 if (CodeGenFunction::ContainsLabel(S: *I, IgnoreCaseStmts: true))
2113 return CSFC_Failure;
2114 return CSFC_Success;
2115 }
2116 }
2117
2118 // If we're about to fall out of a scope without hitting a 'break;', we
2119 // can't perform the optimization if there were any decls in that scope
2120 // (we'd lose their end-of-lifetime).
2121 if (AnyDecls) {
2122 // If the entire compound statement was live, there's one more thing we
2123 // can try before giving up: emit the whole thing as a single statement.
2124 // We can do that unless the statement contains a 'break;'.
2125 // FIXME: Such a break must be at the end of a construct within this one.
2126 // We could emit this by just ignoring the BreakStmts entirely.
2127 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
2128 ResultStmts.resize(N: StartSize);
2129 ResultStmts.push_back(Elt: S);
2130 } else {
2131 return CSFC_Failure;
2132 }
2133 }
2134
2135 return CSFC_FallThrough;
2136 }
2137
2138 // Okay, this is some other statement that we don't handle explicitly, like a
2139 // for statement or increment etc. If we are skipping over this statement,
2140 // just verify it doesn't have labels, which would make it invalid to elide.
2141 if (Case) {
2142 if (CodeGenFunction::ContainsLabel(S, IgnoreCaseStmts: true))
2143 return CSFC_Failure;
2144 return CSFC_Success;
2145 }
2146
2147 // Otherwise, we want to include this statement. Everything is cool with that
2148 // so long as it doesn't contain a break out of the switch we're in.
2149 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
2150
2151 // Otherwise, everything is great. Include the statement and tell the caller
2152 // that we fall through and include the next statement as well.
2153 ResultStmts.push_back(Elt: S);
2154 return CSFC_FallThrough;
2155}
2156
2157/// FindCaseStatementsForValue - Find the case statement being jumped to and
2158/// then invoke CollectStatementsForCase to find the list of statements to emit
2159/// for a switch on constant. See the comment above CollectStatementsForCase
2160/// for more details.
2161static bool FindCaseStatementsForValue(const SwitchStmt &S,
2162 const llvm::APSInt &ConstantCondValue,
2163 SmallVectorImpl<const Stmt*> &ResultStmts,
2164 ASTContext &C,
2165 const SwitchCase *&ResultCase) {
2166 // First step, find the switch case that is being branched to. We can do this
2167 // efficiently by scanning the SwitchCase list.
2168 const SwitchCase *Case = S.getSwitchCaseList();
2169 const DefaultStmt *DefaultCase = nullptr;
2170
2171 for (; Case; Case = Case->getNextSwitchCase()) {
2172 // It's either a default or case. Just remember the default statement in
2173 // case we're not jumping to any numbered cases.
2174 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Val: Case)) {
2175 DefaultCase = DS;
2176 continue;
2177 }
2178
2179 // Check to see if this case is the one we're looking for.
2180 const CaseStmt *CS = cast<CaseStmt>(Val: Case);
2181 // Don't handle case ranges yet.
2182 if (CS->getRHS()) return false;
2183
2184 // If we found our case, remember it as 'case'.
2185 if (CS->getLHS()->EvaluateKnownConstInt(Ctx: C) == ConstantCondValue)
2186 break;
2187 }
2188
2189 // If we didn't find a matching case, we use a default if it exists, or we
2190 // elide the whole switch body!
2191 if (!Case) {
2192 // It is safe to elide the body of the switch if it doesn't contain labels
2193 // etc. If it is safe, return successfully with an empty ResultStmts list.
2194 if (!DefaultCase)
2195 return !CodeGenFunction::ContainsLabel(S: &S);
2196 Case = DefaultCase;
2197 }
2198
2199 // Ok, we know which case is being jumped to, try to collect all the
2200 // statements that follow it. This can fail for a variety of reasons. Also,
2201 // check to see that the recursive walk actually found our case statement.
2202 // Insane cases like this can fail to find it in the recursive walk since we
2203 // don't handle every stmt kind:
2204 // switch (4) {
2205 // while (1) {
2206 // case 4: ...
2207 bool FoundCase = false;
2208 ResultCase = Case;
2209 return CollectStatementsForCase(S: S.getBody(), Case, FoundCase,
2210 ResultStmts) != CSFC_Failure &&
2211 FoundCase;
2212}
2213
2214static std::optional<SmallVector<uint64_t, 16>>
2215getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
2216 // Are there enough branches to weight them?
2217 if (Likelihoods.size() <= 1)
2218 return std::nullopt;
2219
2220 uint64_t NumUnlikely = 0;
2221 uint64_t NumNone = 0;
2222 uint64_t NumLikely = 0;
2223 for (const auto LH : Likelihoods) {
2224 switch (LH) {
2225 case Stmt::LH_Unlikely:
2226 ++NumUnlikely;
2227 break;
2228 case Stmt::LH_None:
2229 ++NumNone;
2230 break;
2231 case Stmt::LH_Likely:
2232 ++NumLikely;
2233 break;
2234 }
2235 }
2236
2237 // Is there a likelihood attribute used?
2238 if (NumUnlikely == 0 && NumLikely == 0)
2239 return std::nullopt;
2240
2241 // When multiple cases share the same code they can be combined during
2242 // optimization. In that case the weights of the branch will be the sum of
2243 // the individual weights. Make sure the combined sum of all neutral cases
2244 // doesn't exceed the value of a single likely attribute.
2245 // The additions both avoid divisions by 0 and make sure the weights of None
2246 // don't exceed the weight of Likely.
2247 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
2248 const uint64_t None = Likely / (NumNone + 1);
2249 const uint64_t Unlikely = 0;
2250
2251 SmallVector<uint64_t, 16> Result;
2252 Result.reserve(N: Likelihoods.size());
2253 for (const auto LH : Likelihoods) {
2254 switch (LH) {
2255 case Stmt::LH_Unlikely:
2256 Result.push_back(Elt: Unlikely);
2257 break;
2258 case Stmt::LH_None:
2259 Result.push_back(Elt: None);
2260 break;
2261 case Stmt::LH_Likely:
2262 Result.push_back(Elt: Likely);
2263 break;
2264 }
2265 }
2266
2267 return Result;
2268}
2269
2270void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
2271 // Handle nested switch statements.
2272 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
2273 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
2274 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
2275 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
2276
2277 // See if we can constant fold the condition of the switch and therefore only
2278 // emit the live case statement (if any) of the switch.
2279 llvm::APSInt ConstantCondValue;
2280 if (ConstantFoldsToSimpleInteger(Cond: S.getCond(), Result&: ConstantCondValue)) {
2281 SmallVector<const Stmt*, 4> CaseStmts;
2282 const SwitchCase *Case = nullptr;
2283 if (FindCaseStatementsForValue(S, ConstantCondValue, ResultStmts&: CaseStmts,
2284 C&: getContext(), ResultCase&: Case)) {
2285 if (Case)
2286 incrementProfileCounter(S: Case);
2287 RunCleanupsScope ExecutedScope(*this);
2288
2289 if (S.getInit())
2290 EmitStmt(S: S.getInit());
2291
2292 // Emit the condition variable if needed inside the entire cleanup scope
2293 // used by this special case for constant folded switches.
2294 if (S.getConditionVariable())
2295 EmitDecl(D: *S.getConditionVariable(), /*EvaluateConditionDecl=*/true);
2296
2297 // At this point, we are no longer "within" a switch instance, so
2298 // we can temporarily enforce this to ensure that any embedded case
2299 // statements are not emitted.
2300 SwitchInsn = nullptr;
2301
2302 // Okay, we can dead code eliminate everything except this case. Emit the
2303 // specified series of statements and we're good.
2304 for (const Stmt *CaseStmt : CaseStmts)
2305 EmitStmt(S: CaseStmt);
2306 incrementProfileCounter(S: &S);
2307 PGO->markStmtMaybeUsed(S: S.getBody());
2308
2309 // Now we want to restore the saved switch instance so that nested
2310 // switches continue to function properly
2311 SwitchInsn = SavedSwitchInsn;
2312
2313 return;
2314 }
2315 }
2316
2317 JumpDest SwitchExit = getJumpDestInCurrentScope(Name: "sw.epilog");
2318
2319 RunCleanupsScope ConditionScope(*this);
2320
2321 if (S.getInit())
2322 EmitStmt(S: S.getInit());
2323
2324 if (S.getConditionVariable())
2325 EmitDecl(D: *S.getConditionVariable());
2326 llvm::Value *CondV = EmitScalarExpr(E: S.getCond());
2327 MaybeEmitDeferredVarDeclInit(var: S.getConditionVariable());
2328
2329 // Create basic block to hold stuff that comes after switch
2330 // statement. We also need to create a default block now so that
2331 // explicit case ranges tests can have a place to jump to on
2332 // failure.
2333 llvm::BasicBlock *DefaultBlock = createBasicBlock(name: "sw.default");
2334 SwitchInsn = Builder.CreateSwitch(V: CondV, Dest: DefaultBlock);
2335 addInstToNewSourceAtom(KeyInstruction: SwitchInsn, Backup: CondV);
2336
2337 if (HLSLControlFlowAttr != HLSLControlFlowHintAttr::SpellingNotCalculated) {
2338 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2339 llvm::ConstantInt *BranchHintConstant =
2340 HLSLControlFlowAttr ==
2341 HLSLControlFlowHintAttr::Spelling::Microsoft_branch
2342 ? llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 1)
2343 : llvm::ConstantInt::get(Ty: CGM.Int32Ty, V: 2);
2344 llvm::Metadata *Vals[] = {MDHelper.createString(Str: "hlsl.controlflow.hint"),
2345 MDHelper.createConstant(C: BranchHintConstant)};
2346 SwitchInsn->setMetadata(Kind: "hlsl.controlflow.hint",
2347 Node: llvm::MDNode::get(Context&: CGM.getLLVMContext(), MDs: Vals));
2348 }
2349
2350 if (PGO->haveRegionCounts()) {
2351 // Walk the SwitchCase list to find how many there are.
2352 uint64_t DefaultCount = 0;
2353 unsigned NumCases = 0;
2354 for (const SwitchCase *Case = S.getSwitchCaseList();
2355 Case;
2356 Case = Case->getNextSwitchCase()) {
2357 if (isa<DefaultStmt>(Val: Case))
2358 DefaultCount = getProfileCount(S: Case);
2359 NumCases += 1;
2360 }
2361 SwitchWeights = new SmallVector<uint64_t, 16>();
2362 SwitchWeights->reserve(N: NumCases);
2363 // The default needs to be first. We store the edge count, so we already
2364 // know the right weight.
2365 SwitchWeights->push_back(Elt: DefaultCount);
2366 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2367 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2368 // Initialize the default case.
2369 SwitchLikelihood->push_back(Elt: Stmt::LH_None);
2370 }
2371
2372 CaseRangeBlock = DefaultBlock;
2373
2374 // Clear the insertion point to indicate we are in unreachable code.
2375 Builder.ClearInsertionPoint();
2376
2377 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2378 // then reuse last ContinueBlock.
2379 JumpDest OuterContinue;
2380 if (!BreakContinueStack.empty())
2381 OuterContinue = BreakContinueStack.back().ContinueBlock;
2382
2383 BreakContinueStack.push_back(Elt: BreakContinue(SwitchExit, OuterContinue));
2384
2385 // Emit switch body.
2386 EmitStmt(S: S.getBody());
2387
2388 BreakContinueStack.pop_back();
2389
2390 // Update the default block in case explicit case range tests have
2391 // been chained on top.
2392 SwitchInsn->setDefaultDest(CaseRangeBlock);
2393
2394 // If a default was never emitted:
2395 if (!DefaultBlock->getParent()) {
2396 // If we have cleanups, emit the default block so that there's a
2397 // place to jump through the cleanups from.
2398 if (ConditionScope.requiresCleanups()) {
2399 EmitBlock(BB: DefaultBlock);
2400
2401 // Otherwise, just forward the default block to the switch end.
2402 } else {
2403 DefaultBlock->replaceAllUsesWith(V: SwitchExit.getBlock());
2404 delete DefaultBlock;
2405 }
2406 }
2407
2408 ConditionScope.ForceCleanup();
2409
2410 // Emit continuation.
2411 EmitBlock(BB: SwitchExit.getBlock(), IsFinished: true);
2412 incrementProfileCounter(S: &S);
2413
2414 // If the switch has a condition wrapped by __builtin_unpredictable,
2415 // create metadata that specifies that the switch is unpredictable.
2416 // Don't bother if not optimizing because that metadata would not be used.
2417 auto *Call = dyn_cast<CallExpr>(Val: S.getCond());
2418 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2419 auto *FD = dyn_cast_or_null<FunctionDecl>(Val: Call->getCalleeDecl());
2420 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2421 llvm::MDBuilder MDHelper(getLLVMContext());
2422 SwitchInsn->setMetadata(KindID: llvm::LLVMContext::MD_unpredictable,
2423 Node: MDHelper.createUnpredictable());
2424 }
2425 }
2426
2427 if (SwitchWeights) {
2428 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2429 "switch weights do not match switch cases");
2430 // If there's only one jump destination there's no sense weighting it.
2431 if (SwitchWeights->size() > 1)
2432 SwitchInsn->setMetadata(KindID: llvm::LLVMContext::MD_prof,
2433 Node: createProfileWeights(Weights: *SwitchWeights));
2434 delete SwitchWeights;
2435 } else if (SwitchLikelihood) {
2436 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2437 "switch likelihoods do not match switch cases");
2438 std::optional<SmallVector<uint64_t, 16>> LHW =
2439 getLikelihoodWeights(Likelihoods: *SwitchLikelihood);
2440 if (LHW) {
2441 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2442 SwitchInsn->setMetadata(KindID: llvm::LLVMContext::MD_prof,
2443 Node: createProfileWeights(Weights: *LHW));
2444 }
2445 delete SwitchLikelihood;
2446 }
2447 SwitchInsn = SavedSwitchInsn;
2448 SwitchWeights = SavedSwitchWeights;
2449 SwitchLikelihood = SavedSwitchLikelihood;
2450 CaseRangeBlock = SavedCRBlock;
2451}
2452
2453static std::string
2454SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2455 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2456 std::string Result;
2457
2458 while (*Constraint) {
2459 switch (*Constraint) {
2460 default:
2461 Result += Target.convertConstraint(Constraint);
2462 break;
2463 // Ignore these
2464 case '*':
2465 case '?':
2466 case '!':
2467 case '=': // Will see this and the following in mult-alt constraints.
2468 case '+':
2469 break;
2470 case '#': // Ignore the rest of the constraint alternative.
2471 while (Constraint[1] && Constraint[1] != ',')
2472 Constraint++;
2473 break;
2474 case '&':
2475 case '%':
2476 Result += *Constraint;
2477 while (Constraint[1] && Constraint[1] == *Constraint)
2478 Constraint++;
2479 break;
2480 case ',':
2481 Result += "|";
2482 break;
2483 case 'g':
2484 Result += "imr";
2485 break;
2486 case '[': {
2487 assert(OutCons &&
2488 "Must pass output names to constraints with a symbolic name");
2489 unsigned Index;
2490 bool result = Target.resolveSymbolicName(Name&: Constraint, OutputConstraints: *OutCons, Index);
2491 assert(result && "Could not resolve symbolic name"); (void)result;
2492 Result += llvm::utostr(X: Index);
2493 break;
2494 }
2495 }
2496
2497 Constraint++;
2498 }
2499
2500 return Result;
2501}
2502
2503/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2504/// as using a particular register add that as a constraint that will be used
2505/// in this asm stmt.
2506static std::string
2507AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2508 const TargetInfo &Target, CodeGenModule &CGM,
2509 const AsmStmt &Stmt, const bool EarlyClobber,
2510 std::string *GCCReg = nullptr) {
2511 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(Val: &AsmExpr);
2512 if (!AsmDeclRef)
2513 return Constraint;
2514 const ValueDecl &Value = *AsmDeclRef->getDecl();
2515 const VarDecl *Variable = dyn_cast<VarDecl>(Val: &Value);
2516 if (!Variable)
2517 return Constraint;
2518 if (Variable->getStorageClass() != SC_Register)
2519 return Constraint;
2520 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2521 if (!Attr)
2522 return Constraint;
2523 StringRef Register = Attr->getLabel();
2524 assert(Target.isValidGCCRegisterName(Register));
2525 // We're using validateOutputConstraint here because we only care if
2526 // this is a register constraint.
2527 TargetInfo::ConstraintInfo Info(Constraint, "");
2528 if (Target.validateOutputConstraint(Info) &&
2529 !Info.allowsRegister()) {
2530 CGM.ErrorUnsupported(S: &Stmt, Type: "__asm__");
2531 return Constraint;
2532 }
2533 // Canonicalize the register here before returning it.
2534 Register = Target.getNormalizedGCCRegisterName(Name: Register);
2535 if (GCCReg != nullptr)
2536 *GCCReg = Register.str();
2537 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2538}
2539
2540std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2541 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2542 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2543 if (Info.allowsRegister() || !Info.allowsMemory()) {
2544 if (CodeGenFunction::hasScalarEvaluationKind(T: InputType))
2545 return {EmitLoadOfLValue(V: InputValue, Loc).getScalarVal(), nullptr};
2546
2547 llvm::Type *Ty = ConvertType(T: InputType);
2548 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2549 if ((Size <= 64 && llvm::isPowerOf2_64(Value: Size)) ||
2550 getTargetHooks().isScalarizableAsmOperand(CGF&: *this, Ty)) {
2551 Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: Size);
2552
2553 return {Builder.CreateLoad(Addr: InputValue.getAddress().withElementType(ElemTy: Ty)),
2554 nullptr};
2555 }
2556 }
2557
2558 Address Addr = InputValue.getAddress();
2559 ConstraintStr += '*';
2560 return {InputValue.getPointer(CGF&: *this), Addr.getElementType()};
2561}
2562
2563std::pair<llvm::Value *, llvm::Type *>
2564CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2565 const Expr *InputExpr,
2566 std::string &ConstraintStr) {
2567 // If this can't be a register or memory, i.e., has to be a constant
2568 // (immediate or symbolic), try to emit it as such.
2569 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2570 if (Info.requiresImmediateConstant()) {
2571 Expr::EvalResult EVResult;
2572 InputExpr->EvaluateAsRValue(Result&: EVResult, Ctx: getContext(), InConstantContext: true);
2573
2574 llvm::APSInt IntResult;
2575 if (EVResult.Val.toIntegralConstant(Result&: IntResult, SrcTy: InputExpr->getType(),
2576 Ctx: getContext()))
2577 return {llvm::ConstantInt::get(Context&: getLLVMContext(), V: IntResult), nullptr};
2578 }
2579
2580 Expr::EvalResult Result;
2581 if (InputExpr->EvaluateAsInt(Result, Ctx: getContext()))
2582 return {llvm::ConstantInt::get(Context&: getLLVMContext(), V: Result.Val.getInt()),
2583 nullptr};
2584 }
2585
2586 if (Info.allowsRegister() || !Info.allowsMemory())
2587 if (CodeGenFunction::hasScalarEvaluationKind(T: InputExpr->getType()))
2588 return {EmitScalarExpr(E: InputExpr), nullptr};
2589 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2590 return {EmitScalarExpr(E: InputExpr), nullptr};
2591 InputExpr = InputExpr->IgnoreParenNoopCasts(Ctx: getContext());
2592 LValue Dest = EmitLValue(E: InputExpr);
2593 return EmitAsmInputLValue(Info, InputValue: Dest, InputType: InputExpr->getType(), ConstraintStr,
2594 Loc: InputExpr->getExprLoc());
2595}
2596
2597/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2598/// asm call instruction. The !srcloc MDNode contains a list of constant
2599/// integers which are the source locations of the start of each line in the
2600/// asm.
2601static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2602 CodeGenFunction &CGF) {
2603 SmallVector<llvm::Metadata *, 8> Locs;
2604 // Add the location of the first line to the MDNode.
2605 Locs.push_back(Elt: llvm::ConstantAsMetadata::get(C: llvm::ConstantInt::get(
2606 Ty: CGF.Int64Ty, V: Str->getBeginLoc().getRawEncoding())));
2607 StringRef StrVal = Str->getString();
2608 if (!StrVal.empty()) {
2609 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2610 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2611 unsigned StartToken = 0;
2612 unsigned ByteOffset = 0;
2613
2614 // Add the location of the start of each subsequent line of the asm to the
2615 // MDNode.
2616 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2617 if (StrVal[i] != '\n') continue;
2618 SourceLocation LineLoc = Str->getLocationOfByte(
2619 ByteNo: i + 1, SM, Features: LangOpts, Target: CGF.getTarget(), StartToken: &StartToken, StartTokenByteOffset: &ByteOffset);
2620 Locs.push_back(Elt: llvm::ConstantAsMetadata::get(
2621 C: llvm::ConstantInt::get(Ty: CGF.Int64Ty, V: LineLoc.getRawEncoding())));
2622 }
2623 }
2624
2625 return llvm::MDNode::get(Context&: CGF.getLLVMContext(), MDs: Locs);
2626}
2627
2628static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2629 bool HasUnwindClobber, bool ReadOnly,
2630 bool ReadNone, bool NoMerge, bool NoConvergent,
2631 const AsmStmt &S,
2632 const std::vector<llvm::Type *> &ResultRegTypes,
2633 const std::vector<llvm::Type *> &ArgElemTypes,
2634 CodeGenFunction &CGF,
2635 std::vector<llvm::Value *> &RegResults) {
2636 if (!HasUnwindClobber)
2637 Result.addFnAttr(Kind: llvm::Attribute::NoUnwind);
2638
2639 if (NoMerge)
2640 Result.addFnAttr(Kind: llvm::Attribute::NoMerge);
2641 // Attach readnone and readonly attributes.
2642 if (!HasSideEffect) {
2643 if (ReadNone)
2644 Result.setDoesNotAccessMemory();
2645 else if (ReadOnly)
2646 Result.setOnlyReadsMemory();
2647 }
2648
2649 // Add elementtype attribute for indirect constraints.
2650 for (auto Pair : llvm::enumerate(First: ArgElemTypes)) {
2651 if (Pair.value()) {
2652 auto Attr = llvm::Attribute::get(
2653 Context&: CGF.getLLVMContext(), Kind: llvm::Attribute::ElementType, Ty: Pair.value());
2654 Result.addParamAttr(ArgNo: Pair.index(), Attr);
2655 }
2656 }
2657
2658 // Slap the source location of the inline asm into a !srcloc metadata on the
2659 // call.
2660 const StringLiteral *SL;
2661 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(Val: &S);
2662 gccAsmStmt &&
2663 (SL = dyn_cast<StringLiteral>(Val: gccAsmStmt->getAsmStringExpr()))) {
2664 Result.setMetadata(Kind: "srcloc", Node: getAsmSrcLocInfo(Str: SL, CGF));
2665 } else {
2666 // At least put the line number on MS inline asm blobs and GCC asm constexpr
2667 // strings.
2668 llvm::Constant *Loc =
2669 llvm::ConstantInt::get(Ty: CGF.Int64Ty, V: S.getAsmLoc().getRawEncoding());
2670 Result.setMetadata(Kind: "srcloc",
2671 Node: llvm::MDNode::get(Context&: CGF.getLLVMContext(),
2672 MDs: llvm::ConstantAsMetadata::get(C: Loc)));
2673 }
2674
2675 if (!NoConvergent && CGF.getLangOpts().assumeFunctionsAreConvergent())
2676 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2677 // convergent (meaning, they may call an intrinsically convergent op, such
2678 // as bar.sync, and so can't have certain optimizations applied around
2679 // them) unless it's explicitly marked 'noconvergent'.
2680 Result.addFnAttr(Kind: llvm::Attribute::Convergent);
2681 // Extract all of the register value results from the asm.
2682 if (ResultRegTypes.size() == 1) {
2683 RegResults.push_back(x: &Result);
2684 } else {
2685 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2686 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(Agg: &Result, Idxs: i, Name: "asmresult");
2687 RegResults.push_back(x: Tmp);
2688 }
2689 }
2690}
2691
2692static void
2693EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
2694 const llvm::ArrayRef<llvm::Value *> RegResults,
2695 const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2696 const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2697 const llvm::ArrayRef<LValue> ResultRegDests,
2698 const llvm::ArrayRef<QualType> ResultRegQualTys,
2699 const llvm::BitVector &ResultTypeRequiresCast,
2700 const llvm::BitVector &ResultRegIsFlagReg) {
2701 CGBuilderTy &Builder = CGF.Builder;
2702 CodeGenModule &CGM = CGF.CGM;
2703 llvm::LLVMContext &CTX = CGF.getLLVMContext();
2704
2705 assert(RegResults.size() == ResultRegTypes.size());
2706 assert(RegResults.size() == ResultTruncRegTypes.size());
2707 assert(RegResults.size() == ResultRegDests.size());
2708 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2709 // in which case its size may grow.
2710 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2711 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2712
2713 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2714 llvm::Value *Tmp = RegResults[i];
2715 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2716
2717 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2718 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2719 // value.
2720 llvm::Constant *Two = llvm::ConstantInt::get(Ty: Tmp->getType(), V: 2);
2721 llvm::Value *IsBooleanValue =
2722 Builder.CreateCmp(Pred: llvm::CmpInst::ICMP_ULT, LHS: Tmp, RHS: Two);
2723 llvm::Function *FnAssume = CGM.getIntrinsic(IID: llvm::Intrinsic::assume);
2724 Builder.CreateCall(Callee: FnAssume, Args: IsBooleanValue);
2725 }
2726
2727 // If the result type of the LLVM IR asm doesn't match the result type of
2728 // the expression, do the conversion.
2729 if (ResultRegTypes[i] != TruncTy) {
2730
2731 // Truncate the integer result to the right size, note that TruncTy can be
2732 // a pointer.
2733 if (TruncTy->isFloatingPointTy())
2734 Tmp = Builder.CreateFPTrunc(V: Tmp, DestTy: TruncTy);
2735 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2736 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(Ty: TruncTy);
2737 Tmp = Builder.CreateTrunc(
2738 V: Tmp, DestTy: llvm::IntegerType::get(C&: CTX, NumBits: (unsigned)ResSize));
2739 Tmp = Builder.CreateIntToPtr(V: Tmp, DestTy: TruncTy);
2740 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2741 uint64_t TmpSize =
2742 CGM.getDataLayout().getTypeSizeInBits(Ty: Tmp->getType());
2743 Tmp = Builder.CreatePtrToInt(
2744 V: Tmp, DestTy: llvm::IntegerType::get(C&: CTX, NumBits: (unsigned)TmpSize));
2745 Tmp = Builder.CreateTrunc(V: Tmp, DestTy: TruncTy);
2746 } else if (Tmp->getType()->isIntegerTy() && TruncTy->isIntegerTy()) {
2747 Tmp = Builder.CreateZExtOrTrunc(V: Tmp, DestTy: TruncTy);
2748 } else if (Tmp->getType()->isVectorTy() || TruncTy->isVectorTy()) {
2749 Tmp = Builder.CreateBitCast(V: Tmp, DestTy: TruncTy);
2750 }
2751 }
2752
2753 LValue Dest = ResultRegDests[i];
2754 // ResultTypeRequiresCast elements correspond to the first
2755 // ResultTypeRequiresCast.size() elements of RegResults.
2756 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2757 unsigned Size = CGF.getContext().getTypeSize(T: ResultRegQualTys[i]);
2758 Address A = Dest.getAddress().withElementType(ElemTy: ResultRegTypes[i]);
2759 if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, Ty: TruncTy)) {
2760 Builder.CreateStore(Val: Tmp, Addr: A);
2761 continue;
2762 }
2763
2764 QualType Ty =
2765 CGF.getContext().getIntTypeForBitwidth(DestWidth: Size, /*Signed=*/false);
2766 if (Ty.isNull()) {
2767 const Expr *OutExpr = S.getOutputExpr(i);
2768 CGM.getDiags().Report(Loc: OutExpr->getExprLoc(),
2769 DiagID: diag::err_store_value_to_reg);
2770 return;
2771 }
2772 Dest = CGF.MakeAddrLValue(Addr: A, T: Ty);
2773 }
2774 CGF.EmitStoreThroughLValue(Src: RValue::get(V: Tmp), Dst: Dest);
2775 }
2776}
2777
2778static void EmitHipStdParUnsupportedAsm(CodeGenFunction *CGF,
2779 const AsmStmt &S) {
2780 constexpr auto Name = "__ASM__hipstdpar_unsupported";
2781
2782 std::string Asm;
2783 if (auto GCCAsm = dyn_cast<GCCAsmStmt>(Val: &S))
2784 Asm = GCCAsm->getAsmString();
2785
2786 auto &Ctx = CGF->CGM.getLLVMContext();
2787
2788 auto StrTy = llvm::ConstantDataArray::getString(Context&: Ctx, Initializer: Asm);
2789 auto FnTy = llvm::FunctionType::get(Result: llvm::Type::getVoidTy(C&: Ctx),
2790 Params: {StrTy->getType()}, isVarArg: false);
2791 auto UBF = CGF->CGM.getModule().getOrInsertFunction(Name, T: FnTy);
2792
2793 CGF->Builder.CreateCall(Callee: UBF, Args: {StrTy});
2794}
2795
2796void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2797 // Pop all cleanup blocks at the end of the asm statement.
2798 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2799
2800 // Assemble the final asm string.
2801 std::string AsmString = S.generateAsmString(C: getContext());
2802
2803 // Get all the output and input constraints together.
2804 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2805 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2806
2807 bool IsHipStdPar = getLangOpts().HIPStdPar && getLangOpts().CUDAIsDevice;
2808 bool IsValidTargetAsm = true;
2809 for (unsigned i = 0, e = S.getNumOutputs(); i != e && IsValidTargetAsm; i++) {
2810 StringRef Name;
2811 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(Val: &S))
2812 Name = GAS->getOutputName(i);
2813 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2814 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2815 if (IsHipStdPar && !IsValid)
2816 IsValidTargetAsm = false;
2817 else
2818 assert(IsValid && "Failed to parse output constraint");
2819 OutputConstraintInfos.push_back(Elt: Info);
2820 }
2821
2822 for (unsigned i = 0, e = S.getNumInputs(); i != e && IsValidTargetAsm; i++) {
2823 StringRef Name;
2824 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(Val: &S))
2825 Name = GAS->getInputName(i);
2826 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2827 bool IsValid =
2828 getTarget().validateInputConstraint(OutputConstraints: OutputConstraintInfos, info&: Info);
2829 if (IsHipStdPar && !IsValid)
2830 IsValidTargetAsm = false;
2831 else
2832 assert(IsValid && "Failed to parse input constraint");
2833 InputConstraintInfos.push_back(Elt: Info);
2834 }
2835
2836 if (!IsValidTargetAsm)
2837 return EmitHipStdParUnsupportedAsm(CGF: this, S);
2838
2839 std::string Constraints;
2840
2841 std::vector<LValue> ResultRegDests;
2842 std::vector<QualType> ResultRegQualTys;
2843 std::vector<llvm::Type *> ResultRegTypes;
2844 std::vector<llvm::Type *> ResultTruncRegTypes;
2845 std::vector<llvm::Type *> ArgTypes;
2846 std::vector<llvm::Type *> ArgElemTypes;
2847 std::vector<llvm::Value*> Args;
2848 llvm::BitVector ResultTypeRequiresCast;
2849 llvm::BitVector ResultRegIsFlagReg;
2850
2851 // Keep track of inout constraints.
2852 std::string InOutConstraints;
2853 std::vector<llvm::Value*> InOutArgs;
2854 std::vector<llvm::Type*> InOutArgTypes;
2855 std::vector<llvm::Type*> InOutArgElemTypes;
2856
2857 // Keep track of out constraints for tied input operand.
2858 std::vector<std::string> OutputConstraints;
2859
2860 // Keep track of defined physregs.
2861 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2862
2863 // An inline asm can be marked readonly if it meets the following conditions:
2864 // - it doesn't have any sideeffects
2865 // - it doesn't clobber memory
2866 // - it doesn't return a value by-reference
2867 // It can be marked readnone if it doesn't have any input memory constraints
2868 // in addition to meeting the conditions listed above.
2869 bool ReadOnly = true, ReadNone = true;
2870
2871 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2872 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2873
2874 // Simplify the output constraint.
2875 std::string OutputConstraint(S.getOutputConstraint(i));
2876 OutputConstraint = SimplifyConstraint(Constraint: OutputConstraint.c_str() + 1,
2877 Target: getTarget(), OutCons: &OutputConstraintInfos);
2878
2879 const Expr *OutExpr = S.getOutputExpr(i);
2880 OutExpr = OutExpr->IgnoreParenNoopCasts(Ctx: getContext());
2881
2882 std::string GCCReg;
2883 OutputConstraint = AddVariableConstraints(Constraint: OutputConstraint, AsmExpr: *OutExpr,
2884 Target: getTarget(), CGM, Stmt: S,
2885 EarlyClobber: Info.earlyClobber(),
2886 GCCReg: &GCCReg);
2887 // Give an error on multiple outputs to same physreg.
2888 if (!GCCReg.empty() && !PhysRegOutputs.insert(V: GCCReg).second)
2889 CGM.Error(loc: S.getAsmLoc(), error: "multiple outputs to hard register: " + GCCReg);
2890
2891 OutputConstraints.push_back(x: OutputConstraint);
2892 LValue Dest = EmitLValue(E: OutExpr);
2893 if (!Constraints.empty())
2894 Constraints += ',';
2895
2896 // If this is a register output, then make the inline asm return it
2897 // by-value. If this is a memory result, return the value by-reference.
2898 QualType QTy = OutExpr->getType();
2899 const bool IsScalarOrAggregate = hasScalarEvaluationKind(T: QTy) ||
2900 hasAggregateEvaluationKind(T: QTy);
2901 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2902
2903 Constraints += "=" + OutputConstraint;
2904 ResultRegQualTys.push_back(x: QTy);
2905 ResultRegDests.push_back(x: Dest);
2906
2907 bool IsFlagReg = llvm::StringRef(OutputConstraint).starts_with(Prefix: "{@cc");
2908 ResultRegIsFlagReg.push_back(Val: IsFlagReg);
2909
2910 llvm::Type *Ty = ConvertTypeForMem(T: QTy);
2911 const bool RequiresCast = Info.allowsRegister() &&
2912 (getTargetHooks().isScalarizableAsmOperand(CGF&: *this, Ty) ||
2913 Ty->isAggregateType());
2914
2915 ResultTruncRegTypes.push_back(x: Ty);
2916 ResultTypeRequiresCast.push_back(Val: RequiresCast);
2917
2918 if (RequiresCast) {
2919 unsigned Size = getContext().getTypeSize(T: QTy);
2920 if (Size)
2921 Ty = llvm::IntegerType::get(C&: getLLVMContext(), NumBits: Size);
2922 else
2923 CGM.Error(loc: OutExpr->getExprLoc(), error: "output size should not be zero");
2924 }
2925 ResultRegTypes.push_back(x: Ty);
2926 // If this output is tied to an input, and if the input is larger, then
2927 // we need to set the actual result type of the inline asm node to be the
2928 // same as the input type.
2929 if (Info.hasMatchingInput()) {
2930 unsigned InputNo;
2931 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2932 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2933 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2934 break;
2935 }
2936 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2937
2938 QualType InputTy = S.getInputExpr(i: InputNo)->getType();
2939 QualType OutputType = OutExpr->getType();
2940
2941 uint64_t InputSize = getContext().getTypeSize(T: InputTy);
2942 if (getContext().getTypeSize(T: OutputType) < InputSize) {
2943 // Form the asm to return the value as a larger integer or fp type.
2944 ResultRegTypes.back() = ConvertType(T: InputTy);
2945 }
2946 }
2947 if (llvm::Type* AdjTy =
2948 getTargetHooks().adjustInlineAsmType(CGF&: *this, Constraint: OutputConstraint,
2949 Ty: ResultRegTypes.back()))
2950 ResultRegTypes.back() = AdjTy;
2951 else {
2952 CGM.getDiags().Report(Loc: S.getAsmLoc(),
2953 DiagID: diag::err_asm_invalid_type_in_input)
2954 << OutExpr->getType() << OutputConstraint;
2955 }
2956
2957 // Update largest vector width for any vector types.
2958 if (auto *VT = dyn_cast<llvm::VectorType>(Val: ResultRegTypes.back()))
2959 LargestVectorWidth =
2960 std::max(a: (uint64_t)LargestVectorWidth,
2961 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
2962 } else {
2963 Address DestAddr = Dest.getAddress();
2964 // Matrix types in memory are represented by arrays, but accessed through
2965 // vector pointers, with the alignment specified on the access operation.
2966 // For inline assembly, update pointer arguments to use vector pointers.
2967 // Otherwise there will be a mis-match if the matrix is also an
2968 // input-argument which is represented as vector.
2969 if (isa<MatrixType>(Val: OutExpr->getType().getCanonicalType()))
2970 DestAddr = DestAddr.withElementType(ElemTy: ConvertType(T: OutExpr->getType()));
2971
2972 ArgTypes.push_back(x: DestAddr.getType());
2973 ArgElemTypes.push_back(x: DestAddr.getElementType());
2974 Args.push_back(x: DestAddr.emitRawPointer(CGF&: *this));
2975 Constraints += "=*";
2976 Constraints += OutputConstraint;
2977 ReadOnly = ReadNone = false;
2978 }
2979
2980 if (Info.isReadWrite()) {
2981 InOutConstraints += ',';
2982
2983 const Expr *InputExpr = S.getOutputExpr(i);
2984 llvm::Value *Arg;
2985 llvm::Type *ArgElemType;
2986 std::tie(args&: Arg, args&: ArgElemType) = EmitAsmInputLValue(
2987 Info, InputValue: Dest, InputType: InputExpr->getType(), ConstraintStr&: InOutConstraints,
2988 Loc: InputExpr->getExprLoc());
2989
2990 if (llvm::Type* AdjTy =
2991 getTargetHooks().adjustInlineAsmType(CGF&: *this, Constraint: OutputConstraint,
2992 Ty: Arg->getType()))
2993 Arg = Builder.CreateBitCast(V: Arg, DestTy: AdjTy);
2994
2995 // Update largest vector width for any vector types.
2996 if (auto *VT = dyn_cast<llvm::VectorType>(Val: Arg->getType()))
2997 LargestVectorWidth =
2998 std::max(a: (uint64_t)LargestVectorWidth,
2999 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
3000 // Only tie earlyclobber physregs.
3001 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
3002 InOutConstraints += llvm::utostr(X: i);
3003 else
3004 InOutConstraints += OutputConstraint;
3005
3006 InOutArgTypes.push_back(x: Arg->getType());
3007 InOutArgElemTypes.push_back(x: ArgElemType);
3008 InOutArgs.push_back(x: Arg);
3009 }
3010 }
3011
3012 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
3013 // to the return value slot. Only do this when returning in registers.
3014 if (isa<MSAsmStmt>(Val: &S)) {
3015 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
3016 if (RetAI.isDirect() || RetAI.isExtend()) {
3017 // Make a fake lvalue for the return value slot.
3018 LValue ReturnSlot = MakeAddrLValueWithoutTBAA(Addr: ReturnValue, T: FnRetTy);
3019 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
3020 CGF&: *this, ReturnValue: ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
3021 ResultRegDests, AsmString, NumOutputs: S.getNumOutputs());
3022 SawAsmBlock = true;
3023 }
3024 }
3025
3026 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
3027 const Expr *InputExpr = S.getInputExpr(i);
3028
3029 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
3030
3031 if (Info.allowsMemory())
3032 ReadNone = false;
3033
3034 if (!Constraints.empty())
3035 Constraints += ',';
3036
3037 // Simplify the input constraint.
3038 std::string InputConstraint(S.getInputConstraint(i));
3039 InputConstraint = SimplifyConstraint(Constraint: InputConstraint.c_str(), Target: getTarget(),
3040 OutCons: &OutputConstraintInfos);
3041
3042 InputConstraint = AddVariableConstraints(
3043 Constraint: InputConstraint, AsmExpr: *InputExpr->IgnoreParenNoopCasts(Ctx: getContext()),
3044 Target: getTarget(), CGM, Stmt: S, EarlyClobber: false /* No EarlyClobber */);
3045
3046 std::string ReplaceConstraint (InputConstraint);
3047 llvm::Value *Arg;
3048 llvm::Type *ArgElemType;
3049 std::tie(args&: Arg, args&: ArgElemType) = EmitAsmInput(Info, InputExpr, ConstraintStr&: Constraints);
3050
3051 // If this input argument is tied to a larger output result, extend the
3052 // input to be the same size as the output. The LLVM backend wants to see
3053 // the input and output of a matching constraint be the same size. Note
3054 // that GCC does not define what the top bits are here. We use zext because
3055 // that is usually cheaper, but LLVM IR should really get an anyext someday.
3056 if (Info.hasTiedOperand()) {
3057 unsigned Output = Info.getTiedOperand();
3058 QualType OutputType = S.getOutputExpr(i: Output)->getType();
3059 QualType InputTy = InputExpr->getType();
3060
3061 if (getContext().getTypeSize(T: OutputType) >
3062 getContext().getTypeSize(T: InputTy)) {
3063 // Use ptrtoint as appropriate so that we can do our extension.
3064 if (isa<llvm::PointerType>(Val: Arg->getType()))
3065 Arg = Builder.CreatePtrToInt(V: Arg, DestTy: IntPtrTy);
3066 llvm::Type *OutputTy = ConvertType(T: OutputType);
3067 if (isa<llvm::IntegerType>(Val: OutputTy))
3068 Arg = Builder.CreateZExt(V: Arg, DestTy: OutputTy);
3069 else if (isa<llvm::PointerType>(Val: OutputTy))
3070 Arg = Builder.CreateZExt(V: Arg, DestTy: IntPtrTy);
3071 else if (OutputTy->isFloatingPointTy())
3072 Arg = Builder.CreateFPExt(V: Arg, DestTy: OutputTy);
3073 }
3074 // Deal with the tied operands' constraint code in adjustInlineAsmType.
3075 ReplaceConstraint = OutputConstraints[Output];
3076 }
3077 if (llvm::Type* AdjTy =
3078 getTargetHooks().adjustInlineAsmType(CGF&: *this, Constraint: ReplaceConstraint,
3079 Ty: Arg->getType()))
3080 Arg = Builder.CreateBitCast(V: Arg, DestTy: AdjTy);
3081 else
3082 CGM.getDiags().Report(Loc: S.getAsmLoc(), DiagID: diag::err_asm_invalid_type_in_input)
3083 << InputExpr->getType() << InputConstraint;
3084
3085 // Update largest vector width for any vector types.
3086 if (auto *VT = dyn_cast<llvm::VectorType>(Val: Arg->getType()))
3087 LargestVectorWidth =
3088 std::max(a: (uint64_t)LargestVectorWidth,
3089 b: VT->getPrimitiveSizeInBits().getKnownMinValue());
3090
3091 ArgTypes.push_back(x: Arg->getType());
3092 ArgElemTypes.push_back(x: ArgElemType);
3093 Args.push_back(x: Arg);
3094 Constraints += InputConstraint;
3095 }
3096
3097 // Append the "input" part of inout constraints.
3098 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
3099 ArgTypes.push_back(x: InOutArgTypes[i]);
3100 ArgElemTypes.push_back(x: InOutArgElemTypes[i]);
3101 Args.push_back(x: InOutArgs[i]);
3102 }
3103 Constraints += InOutConstraints;
3104
3105 // Labels
3106 SmallVector<llvm::BasicBlock *, 16> Transfer;
3107 llvm::BasicBlock *Fallthrough = nullptr;
3108 bool IsGCCAsmGoto = false;
3109 if (const auto *GS = dyn_cast<GCCAsmStmt>(Val: &S)) {
3110 IsGCCAsmGoto = GS->isAsmGoto();
3111 if (IsGCCAsmGoto) {
3112 for (const auto *E : GS->labels()) {
3113 JumpDest Dest = getJumpDestForLabel(D: E->getLabel());
3114 Transfer.push_back(Elt: Dest.getBlock());
3115 if (!Constraints.empty())
3116 Constraints += ',';
3117 Constraints += "!i";
3118 }
3119 Fallthrough = createBasicBlock(name: "asm.fallthrough");
3120 }
3121 }
3122
3123 bool HasUnwindClobber = false;
3124
3125 // Clobbers
3126 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
3127 std::string Clobber = S.getClobber(i);
3128
3129 if (Clobber == "memory")
3130 ReadOnly = ReadNone = false;
3131 else if (Clobber == "unwind") {
3132 HasUnwindClobber = true;
3133 continue;
3134 } else if (Clobber != "cc") {
3135 Clobber = getTarget().getNormalizedGCCRegisterName(Name: Clobber);
3136 if (CGM.getCodeGenOpts().StackClashProtector &&
3137 getTarget().isSPRegName(Clobber)) {
3138 CGM.getDiags().Report(Loc: S.getAsmLoc(),
3139 DiagID: diag::warn_stack_clash_protection_inline_asm);
3140 }
3141 }
3142
3143 if (isa<MSAsmStmt>(Val: &S)) {
3144 if (Clobber == "eax" || Clobber == "edx") {
3145 if (Constraints.find(s: "=&A") != std::string::npos)
3146 continue;
3147 std::string::size_type position1 =
3148 Constraints.find(str: "={" + Clobber + "}");
3149 if (position1 != std::string::npos) {
3150 Constraints.insert(pos: position1 + 1, s: "&");
3151 continue;
3152 }
3153 std::string::size_type position2 = Constraints.find(s: "=A");
3154 if (position2 != std::string::npos) {
3155 Constraints.insert(pos: position2 + 1, s: "&");
3156 continue;
3157 }
3158 }
3159 }
3160 if (!Constraints.empty())
3161 Constraints += ',';
3162
3163 Constraints += "~{";
3164 Constraints += Clobber;
3165 Constraints += '}';
3166 }
3167
3168 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
3169 "unwind clobber can't be used with asm goto");
3170
3171 // Add machine specific clobbers
3172 std::string_view MachineClobbers = getTarget().getClobbers();
3173 if (!MachineClobbers.empty()) {
3174 if (!Constraints.empty())
3175 Constraints += ',';
3176 Constraints += MachineClobbers;
3177 }
3178
3179 llvm::Type *ResultType;
3180 if (ResultRegTypes.empty())
3181 ResultType = VoidTy;
3182 else if (ResultRegTypes.size() == 1)
3183 ResultType = ResultRegTypes[0];
3184 else
3185 ResultType = llvm::StructType::get(Context&: getLLVMContext(), Elements: ResultRegTypes);
3186
3187 llvm::FunctionType *FTy =
3188 llvm::FunctionType::get(Result: ResultType, Params: ArgTypes, isVarArg: false);
3189
3190 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
3191
3192 llvm::InlineAsm::AsmDialect GnuAsmDialect =
3193 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
3194 ? llvm::InlineAsm::AD_ATT
3195 : llvm::InlineAsm::AD_Intel;
3196 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(Val: &S) ?
3197 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
3198
3199 llvm::InlineAsm *IA = llvm::InlineAsm::get(
3200 Ty: FTy, AsmString, Constraints, hasSideEffects: HasSideEffect,
3201 /* IsAlignStack */ isAlignStack: false, asmDialect: AsmDialect, canThrow: HasUnwindClobber);
3202 std::vector<llvm::Value*> RegResults;
3203 llvm::CallBrInst *CBR;
3204 llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
3205 CBRRegResults;
3206 if (IsGCCAsmGoto) {
3207 CBR = Builder.CreateCallBr(Callee: IA, DefaultDest: Fallthrough, IndirectDests: Transfer, Args);
3208 EmitBlock(BB: Fallthrough);
3209 UpdateAsmCallInst(Result&: *CBR, HasSideEffect, /*HasUnwindClobber=*/false, ReadOnly,
3210 ReadNone, NoMerge: InNoMergeAttributedStmt,
3211 NoConvergent: InNoConvergentAttributedStmt, S, ResultRegTypes,
3212 ArgElemTypes, CGF&: *this, RegResults);
3213 // Because we are emitting code top to bottom, we don't have enough
3214 // information at this point to know precisely whether we have a critical
3215 // edge. If we have outputs, split all indirect destinations.
3216 if (!RegResults.empty()) {
3217 unsigned i = 0;
3218 for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
3219 llvm::Twine SynthName = Dest->getName() + ".split";
3220 llvm::BasicBlock *SynthBB = createBasicBlock(name: SynthName);
3221 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3222 Builder.SetInsertPoint(SynthBB);
3223
3224 if (ResultRegTypes.size() == 1) {
3225 CBRRegResults[SynthBB].push_back(Elt: CBR);
3226 } else {
3227 for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
3228 llvm::Value *Tmp = Builder.CreateExtractValue(Agg: CBR, Idxs: j, Name: "asmresult");
3229 CBRRegResults[SynthBB].push_back(Elt: Tmp);
3230 }
3231 }
3232
3233 EmitBranch(Target: Dest);
3234 EmitBlock(BB: SynthBB);
3235 CBR->setIndirectDest(i: i++, B: SynthBB);
3236 }
3237 }
3238 } else if (HasUnwindClobber) {
3239 llvm::CallBase *Result = EmitCallOrInvoke(Callee: IA, Args, Name: "");
3240 UpdateAsmCallInst(Result&: *Result, HasSideEffect, /*HasUnwindClobber=*/true,
3241 ReadOnly, ReadNone, NoMerge: InNoMergeAttributedStmt,
3242 NoConvergent: InNoConvergentAttributedStmt, S, ResultRegTypes,
3243 ArgElemTypes, CGF&: *this, RegResults);
3244 } else {
3245 llvm::CallInst *Result =
3246 Builder.CreateCall(Callee: IA, Args, OpBundles: getBundlesForFunclet(Callee: IA));
3247 UpdateAsmCallInst(Result&: *Result, HasSideEffect, /*HasUnwindClobber=*/false,
3248 ReadOnly, ReadNone, NoMerge: InNoMergeAttributedStmt,
3249 NoConvergent: InNoConvergentAttributedStmt, S, ResultRegTypes,
3250 ArgElemTypes, CGF&: *this, RegResults);
3251 }
3252
3253 EmitAsmStores(CGF&: *this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
3254 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
3255 ResultRegIsFlagReg);
3256
3257 // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
3258 // different insertion point; one for each indirect destination and with
3259 // CBRRegResults rather than RegResults.
3260 if (IsGCCAsmGoto && !CBRRegResults.empty()) {
3261 for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
3262 llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
3263 Builder.SetInsertPoint(TheBB: Succ, IP: --(Succ->end()));
3264 EmitAsmStores(CGF&: *this, S, RegResults: CBRRegResults[Succ], ResultRegTypes,
3265 ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
3266 ResultTypeRequiresCast, ResultRegIsFlagReg);
3267 }
3268 }
3269}
3270
3271LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
3272 const RecordDecl *RD = S.getCapturedRecordDecl();
3273 QualType RecordTy = getContext().getRecordType(Decl: RD);
3274
3275 // Initialize the captured struct.
3276 LValue SlotLV =
3277 MakeAddrLValue(Addr: CreateMemTemp(T: RecordTy, Name: "agg.captured"), T: RecordTy);
3278
3279 RecordDecl::field_iterator CurField = RD->field_begin();
3280 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
3281 E = S.capture_init_end();
3282 I != E; ++I, ++CurField) {
3283 LValue LV = EmitLValueForFieldInitialization(Base: SlotLV, Field: *CurField);
3284 if (CurField->hasCapturedVLAType()) {
3285 EmitLambdaVLACapture(VAT: CurField->getCapturedVLAType(), LV);
3286 } else {
3287 EmitInitializerForField(Field: *CurField, LHS: LV, Init: *I);
3288 }
3289 }
3290
3291 return SlotLV;
3292}
3293
3294/// Generate an outlined function for the body of a CapturedStmt, store any
3295/// captured variables into the captured struct, and call the outlined function.
3296llvm::Function *
3297CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
3298 LValue CapStruct = InitCapturedStruct(S);
3299
3300 // Emit the CapturedDecl
3301 CodeGenFunction CGF(CGM, true);
3302 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
3303 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
3304 delete CGF.CapturedStmtInfo;
3305
3306 // Emit call to the helper function.
3307 EmitCallOrInvoke(Callee: F, Args: CapStruct.getPointer(CGF&: *this));
3308
3309 return F;
3310}
3311
3312Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
3313 LValue CapStruct = InitCapturedStruct(S);
3314 return CapStruct.getAddress();
3315}
3316
3317/// Creates the outlined function for a CapturedStmt.
3318llvm::Function *
3319CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
3320 assert(CapturedStmtInfo &&
3321 "CapturedStmtInfo should be set when generating the captured function");
3322 const CapturedDecl *CD = S.getCapturedDecl();
3323 const RecordDecl *RD = S.getCapturedRecordDecl();
3324 SourceLocation Loc = S.getBeginLoc();
3325 assert(CD->hasBody() && "missing CapturedDecl body");
3326
3327 // Build the argument list.
3328 ASTContext &Ctx = CGM.getContext();
3329 FunctionArgList Args;
3330 Args.append(in_start: CD->param_begin(), in_end: CD->param_end());
3331
3332 // Create the function declaration.
3333 const CGFunctionInfo &FuncInfo =
3334 CGM.getTypes().arrangeBuiltinFunctionDeclaration(resultType: Ctx.VoidTy, args: Args);
3335 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(Info: FuncInfo);
3336
3337 llvm::Function *F =
3338 llvm::Function::Create(Ty: FuncLLVMTy, Linkage: llvm::GlobalValue::InternalLinkage,
3339 N: CapturedStmtInfo->getHelperName(), M: &CGM.getModule());
3340 CGM.SetInternalFunctionAttributes(GD: CD, F, FI: FuncInfo);
3341 if (CD->isNothrow())
3342 F->addFnAttr(Kind: llvm::Attribute::NoUnwind);
3343
3344 // Generate the function.
3345 StartFunction(GD: CD, RetTy: Ctx.VoidTy, Fn: F, FnInfo: FuncInfo, Args, Loc: CD->getLocation(),
3346 StartLoc: CD->getBody()->getBeginLoc());
3347 // Set the context parameter in CapturedStmtInfo.
3348 Address DeclPtr = GetAddrOfLocalVar(VD: CD->getContextParam());
3349 CapturedStmtInfo->setContextValue(Builder.CreateLoad(Addr: DeclPtr));
3350
3351 // Initialize variable-length arrays.
3352 LValue Base = MakeNaturalAlignRawAddrLValue(
3353 V: CapturedStmtInfo->getContextValue(), T: Ctx.getTagDeclType(Decl: RD));
3354 for (auto *FD : RD->fields()) {
3355 if (FD->hasCapturedVLAType()) {
3356 auto *ExprArg =
3357 EmitLoadOfLValue(V: EmitLValueForField(Base, Field: FD), Loc: S.getBeginLoc())
3358 .getScalarVal();
3359 auto VAT = FD->getCapturedVLAType();
3360 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
3361 }
3362 }
3363
3364 // If 'this' is captured, load it into CXXThisValue.
3365 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
3366 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
3367 LValue ThisLValue = EmitLValueForField(Base, Field: FD);
3368 CXXThisValue = EmitLoadOfLValue(V: ThisLValue, Loc).getScalarVal();
3369 }
3370
3371 PGO->assignRegionCounters(GD: GlobalDecl(CD), Fn: F);
3372 CapturedStmtInfo->EmitBody(CGF&: *this, S: CD->getBody());
3373 FinishFunction(EndLoc: CD->getBodyRBrace());
3374
3375 return F;
3376}
3377
3378// Returns the first convergence entry/loop/anchor instruction found in |BB|.
3379// std::nullptr otherwise.
3380static llvm::ConvergenceControlInst *getConvergenceToken(llvm::BasicBlock *BB) {
3381 for (auto &I : *BB) {
3382 if (auto *CI = dyn_cast<llvm::ConvergenceControlInst>(Val: &I))
3383 return CI;
3384 }
3385 return nullptr;
3386}
3387
3388llvm::CallBase *
3389CodeGenFunction::addConvergenceControlToken(llvm::CallBase *Input) {
3390 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3391 assert(ParentToken);
3392
3393 llvm::Value *bundleArgs[] = {ParentToken};
3394 llvm::OperandBundleDef OB("convergencectrl", bundleArgs);
3395 auto *Output = llvm::CallBase::addOperandBundle(
3396 CB: Input, ID: llvm::LLVMContext::OB_convergencectrl, OB, InsertPt: Input->getIterator());
3397 Input->replaceAllUsesWith(V: Output);
3398 Input->eraseFromParent();
3399 return Output;
3400}
3401
3402llvm::ConvergenceControlInst *
3403CodeGenFunction::emitConvergenceLoopToken(llvm::BasicBlock *BB) {
3404 llvm::ConvergenceControlInst *ParentToken = ConvergenceTokenStack.back();
3405 assert(ParentToken);
3406 return llvm::ConvergenceControlInst::CreateLoop(BB&: *BB, Parent: ParentToken);
3407}
3408
3409llvm::ConvergenceControlInst *
3410CodeGenFunction::getOrEmitConvergenceEntryToken(llvm::Function *F) {
3411 llvm::BasicBlock *BB = &F->getEntryBlock();
3412 llvm::ConvergenceControlInst *Token = getConvergenceToken(BB);
3413 if (Token)
3414 return Token;
3415
3416 // Adding a convergence token requires the function to be marked as
3417 // convergent.
3418 F->setConvergent();
3419 return llvm::ConvergenceControlInst::CreateEntry(BB&: *BB);
3420}
3421