1 | //===- ABIInfoImpl.cpp ----------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "ABIInfoImpl.h" |
10 | |
11 | using namespace clang; |
12 | using namespace clang::CodeGen; |
13 | |
14 | // Pin the vtable to this file. |
15 | DefaultABIInfo::~DefaultABIInfo() = default; |
16 | |
17 | ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const { |
18 | Ty = useFirstFieldIfTransparentUnion(Ty); |
19 | |
20 | if (isAggregateTypeForABI(T: Ty)) { |
21 | // Records with non-trivial destructors/copy-constructors should not be |
22 | // passed by value. |
23 | if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(T: Ty, CXXABI&: getCXXABI())) |
24 | return getNaturalAlignIndirect(Ty, ByVal: RAA == CGCXXABI::RAA_DirectInMemory); |
25 | |
26 | return getNaturalAlignIndirect(Ty); |
27 | } |
28 | |
29 | // Treat an enum type as its underlying type. |
30 | if (const EnumType *EnumTy = Ty->getAs<EnumType>()) |
31 | Ty = EnumTy->getDecl()->getIntegerType(); |
32 | |
33 | ASTContext &Context = getContext(); |
34 | if (const auto *EIT = Ty->getAs<BitIntType>()) |
35 | if (EIT->getNumBits() > |
36 | Context.getTypeSize(T: Context.getTargetInfo().hasInt128Type() |
37 | ? Context.Int128Ty |
38 | : Context.LongLongTy)) |
39 | return getNaturalAlignIndirect(Ty); |
40 | |
41 | return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty) |
42 | : ABIArgInfo::getDirect()); |
43 | } |
44 | |
45 | ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const { |
46 | if (RetTy->isVoidType()) |
47 | return ABIArgInfo::getIgnore(); |
48 | |
49 | if (isAggregateTypeForABI(T: RetTy)) |
50 | return getNaturalAlignIndirect(Ty: RetTy); |
51 | |
52 | // Treat an enum type as its underlying type. |
53 | if (const EnumType *EnumTy = RetTy->getAs<EnumType>()) |
54 | RetTy = EnumTy->getDecl()->getIntegerType(); |
55 | |
56 | if (const auto *EIT = RetTy->getAs<BitIntType>()) |
57 | if (EIT->getNumBits() > |
58 | getContext().getTypeSize(T: getContext().getTargetInfo().hasInt128Type() |
59 | ? getContext().Int128Ty |
60 | : getContext().LongLongTy)) |
61 | return getNaturalAlignIndirect(Ty: RetTy); |
62 | |
63 | return (isPromotableIntegerTypeForABI(Ty: RetTy) ? ABIArgInfo::getExtend(Ty: RetTy) |
64 | : ABIArgInfo::getDirect()); |
65 | } |
66 | |
67 | void DefaultABIInfo::computeInfo(CGFunctionInfo &FI) const { |
68 | if (!getCXXABI().classifyReturnType(FI)) |
69 | FI.getReturnInfo() = classifyReturnType(RetTy: FI.getReturnType()); |
70 | for (auto &I : FI.arguments()) |
71 | I.info = classifyArgumentType(Ty: I.type); |
72 | } |
73 | |
74 | RValue DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr, |
75 | QualType Ty, AggValueSlot Slot) const { |
76 | return CGF.EmitLoadOfAnyValue( |
77 | V: CGF.MakeAddrLValue( |
78 | Addr: EmitVAArgInstr(CGF, VAListAddr, Ty, AI: classifyArgumentType(Ty)), T: Ty), |
79 | Slot); |
80 | } |
81 | |
82 | ABIArgInfo CodeGen::coerceToIntArray(QualType Ty, ASTContext &Context, |
83 | llvm::LLVMContext &LLVMContext) { |
84 | // Alignment and Size are measured in bits. |
85 | const uint64_t Size = Context.getTypeSize(T: Ty); |
86 | const uint64_t Alignment = Context.getTypeAlign(T: Ty); |
87 | llvm::Type *IntType = llvm::Type::getIntNTy(C&: LLVMContext, N: Alignment); |
88 | const uint64_t NumElements = (Size + Alignment - 1) / Alignment; |
89 | return ABIArgInfo::getDirect(T: llvm::ArrayType::get(ElementType: IntType, NumElements)); |
90 | } |
91 | |
92 | void CodeGen::AssignToArrayRange(CodeGen::CGBuilderTy &Builder, |
93 | llvm::Value *Array, llvm::Value *Value, |
94 | unsigned FirstIndex, unsigned LastIndex) { |
95 | // Alternatively, we could emit this as a loop in the source. |
96 | for (unsigned I = FirstIndex; I <= LastIndex; ++I) { |
97 | llvm::Value *Cell = |
98 | Builder.CreateConstInBoundsGEP1_32(Ty: Builder.getInt8Ty(), Ptr: Array, Idx0: I); |
99 | Builder.CreateAlignedStore(Val: Value, Addr: Cell, Align: CharUnits::One()); |
100 | } |
101 | } |
102 | |
103 | bool CodeGen::isAggregateTypeForABI(QualType T) { |
104 | return !CodeGenFunction::hasScalarEvaluationKind(T) || |
105 | T->isMemberFunctionPointerType(); |
106 | } |
107 | |
108 | llvm::Type *CodeGen::getVAListElementType(CodeGenFunction &CGF) { |
109 | return CGF.ConvertTypeForMem( |
110 | T: CGF.getContext().getBuiltinVaListType()->getPointeeType()); |
111 | } |
112 | |
113 | CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(const RecordType *RT, |
114 | CGCXXABI &CXXABI) { |
115 | const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Val: RT->getDecl()); |
116 | if (!RD) { |
117 | if (!RT->getDecl()->canPassInRegisters()) |
118 | return CGCXXABI::RAA_Indirect; |
119 | return CGCXXABI::RAA_Default; |
120 | } |
121 | return CXXABI.getRecordArgABI(RD); |
122 | } |
123 | |
124 | CGCXXABI::RecordArgABI CodeGen::getRecordArgABI(QualType T, CGCXXABI &CXXABI) { |
125 | const RecordType *RT = T->getAs<RecordType>(); |
126 | if (!RT) |
127 | return CGCXXABI::RAA_Default; |
128 | return getRecordArgABI(RT, CXXABI); |
129 | } |
130 | |
131 | bool CodeGen::classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI, |
132 | const ABIInfo &Info) { |
133 | QualType Ty = FI.getReturnType(); |
134 | |
135 | if (const auto *RT = Ty->getAs<RecordType>()) |
136 | if (!isa<CXXRecordDecl>(Val: RT->getDecl()) && |
137 | !RT->getDecl()->canPassInRegisters()) { |
138 | FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty); |
139 | return true; |
140 | } |
141 | |
142 | return CXXABI.classifyReturnType(FI); |
143 | } |
144 | |
145 | QualType CodeGen::useFirstFieldIfTransparentUnion(QualType Ty) { |
146 | if (const RecordType *UT = Ty->getAsUnionType()) { |
147 | const RecordDecl *UD = UT->getDecl(); |
148 | if (UD->hasAttr<TransparentUnionAttr>()) { |
149 | assert(!UD->field_empty() && "sema created an empty transparent union" ); |
150 | return UD->field_begin()->getType(); |
151 | } |
152 | } |
153 | return Ty; |
154 | } |
155 | |
156 | llvm::Value *CodeGen::emitRoundPointerUpToAlignment(CodeGenFunction &CGF, |
157 | llvm::Value *Ptr, |
158 | CharUnits Align) { |
159 | // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align; |
160 | llvm::Value *RoundUp = CGF.Builder.CreateConstInBoundsGEP1_32( |
161 | Ty: CGF.Builder.getInt8Ty(), Ptr, Idx0: Align.getQuantity() - 1); |
162 | return CGF.Builder.CreateIntrinsic( |
163 | ID: llvm::Intrinsic::ptrmask, Types: {Ptr->getType(), CGF.IntPtrTy}, |
164 | Args: {RoundUp, llvm::ConstantInt::get(Ty: CGF.IntPtrTy, V: -Align.getQuantity())}, |
165 | FMFSource: nullptr, Name: Ptr->getName() + ".aligned" ); |
166 | } |
167 | |
168 | Address |
169 | CodeGen::emitVoidPtrDirectVAArg(CodeGenFunction &CGF, Address VAListAddr, |
170 | llvm::Type *DirectTy, CharUnits DirectSize, |
171 | CharUnits DirectAlign, CharUnits SlotSize, |
172 | bool AllowHigherAlign, bool ForceRightAdjust) { |
173 | // Cast the element type to i8* if necessary. Some platforms define |
174 | // va_list as a struct containing an i8* instead of just an i8*. |
175 | if (VAListAddr.getElementType() != CGF.Int8PtrTy) |
176 | VAListAddr = VAListAddr.withElementType(ElemTy: CGF.Int8PtrTy); |
177 | |
178 | llvm::Value *Ptr = CGF.Builder.CreateLoad(Addr: VAListAddr, Name: "argp.cur" ); |
179 | |
180 | // If the CC aligns values higher than the slot size, do so if needed. |
181 | Address Addr = Address::invalid(); |
182 | if (AllowHigherAlign && DirectAlign > SlotSize) { |
183 | Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align: DirectAlign), |
184 | CGF.Int8Ty, DirectAlign); |
185 | } else { |
186 | Addr = Address(Ptr, CGF.Int8Ty, SlotSize); |
187 | } |
188 | |
189 | // Advance the pointer past the argument, then store that back. |
190 | CharUnits FullDirectSize = DirectSize.alignTo(Align: SlotSize); |
191 | Address NextPtr = |
192 | CGF.Builder.CreateConstInBoundsByteGEP(Addr, Offset: FullDirectSize, Name: "argp.next" ); |
193 | CGF.Builder.CreateStore(Val: NextPtr.emitRawPointer(CGF), Addr: VAListAddr); |
194 | |
195 | // If the argument is smaller than a slot, and this is a big-endian |
196 | // target, the argument will be right-adjusted in its slot. |
197 | if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() && |
198 | (!DirectTy->isStructTy() || ForceRightAdjust)) { |
199 | Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, Offset: SlotSize - DirectSize); |
200 | } |
201 | |
202 | return Addr.withElementType(ElemTy: DirectTy); |
203 | } |
204 | |
205 | RValue CodeGen::emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr, |
206 | QualType ValueTy, bool IsIndirect, |
207 | TypeInfoChars ValueInfo, |
208 | CharUnits SlotSizeAndAlign, |
209 | bool AllowHigherAlign, AggValueSlot Slot, |
210 | bool ForceRightAdjust) { |
211 | // The size and alignment of the value that was passed directly. |
212 | CharUnits DirectSize, DirectAlign; |
213 | if (IsIndirect) { |
214 | DirectSize = CGF.getPointerSize(); |
215 | DirectAlign = CGF.getPointerAlign(); |
216 | } else { |
217 | DirectSize = ValueInfo.Width; |
218 | DirectAlign = ValueInfo.Align; |
219 | } |
220 | |
221 | // Cast the address we've calculated to the right type. |
222 | llvm::Type *DirectTy = CGF.ConvertTypeForMem(T: ValueTy), *ElementTy = DirectTy; |
223 | if (IsIndirect) { |
224 | unsigned AllocaAS = CGF.CGM.getDataLayout().getAllocaAddrSpace(); |
225 | DirectTy = llvm::PointerType::get(C&: CGF.getLLVMContext(), AddressSpace: AllocaAS); |
226 | } |
227 | |
228 | Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, DirectSize, |
229 | DirectAlign, SlotSize: SlotSizeAndAlign, |
230 | AllowHigherAlign, ForceRightAdjust); |
231 | |
232 | if (IsIndirect) { |
233 | Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align); |
234 | } |
235 | |
236 | return CGF.EmitLoadOfAnyValue(V: CGF.MakeAddrLValue(Addr, T: ValueTy), Slot); |
237 | } |
238 | |
239 | Address CodeGen::emitMergePHI(CodeGenFunction &CGF, Address Addr1, |
240 | llvm::BasicBlock *Block1, Address Addr2, |
241 | llvm::BasicBlock *Block2, |
242 | const llvm::Twine &Name) { |
243 | assert(Addr1.getType() == Addr2.getType()); |
244 | llvm::PHINode *PHI = CGF.Builder.CreatePHI(Ty: Addr1.getType(), NumReservedValues: 2, Name); |
245 | PHI->addIncoming(V: Addr1.emitRawPointer(CGF), BB: Block1); |
246 | PHI->addIncoming(V: Addr2.emitRawPointer(CGF), BB: Block2); |
247 | CharUnits Align = std::min(a: Addr1.getAlignment(), b: Addr2.getAlignment()); |
248 | return Address(PHI, Addr1.getElementType(), Align); |
249 | } |
250 | |
251 | bool CodeGen::isEmptyField(ASTContext &Context, const FieldDecl *FD, |
252 | bool AllowArrays, bool AsIfNoUniqueAddr) { |
253 | if (FD->isUnnamedBitField()) |
254 | return true; |
255 | |
256 | QualType FT = FD->getType(); |
257 | |
258 | // Constant arrays of empty records count as empty, strip them off. |
259 | // Constant arrays of zero length always count as empty. |
260 | bool WasArray = false; |
261 | if (AllowArrays) |
262 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(T: FT)) { |
263 | if (AT->isZeroSize()) |
264 | return true; |
265 | FT = AT->getElementType(); |
266 | // The [[no_unique_address]] special case below does not apply to |
267 | // arrays of C++ empty records, so we need to remember this fact. |
268 | WasArray = true; |
269 | } |
270 | |
271 | const RecordType *RT = FT->getAs<RecordType>(); |
272 | if (!RT) |
273 | return false; |
274 | |
275 | // C++ record fields are never empty, at least in the Itanium ABI. |
276 | // |
277 | // FIXME: We should use a predicate for whether this behavior is true in the |
278 | // current ABI. |
279 | // |
280 | // The exception to the above rule are fields marked with the |
281 | // [[no_unique_address]] attribute (since C++20). Those do count as empty |
282 | // according to the Itanium ABI. The exception applies only to records, |
283 | // not arrays of records, so we must also check whether we stripped off an |
284 | // array type above. |
285 | if (isa<CXXRecordDecl>(Val: RT->getDecl()) && |
286 | (WasArray || (!AsIfNoUniqueAddr && !FD->hasAttr<NoUniqueAddressAttr>()))) |
287 | return false; |
288 | |
289 | return isEmptyRecord(Context, T: FT, AllowArrays, AsIfNoUniqueAddr); |
290 | } |
291 | |
292 | bool CodeGen::isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays, |
293 | bool AsIfNoUniqueAddr) { |
294 | const RecordType *RT = T->getAs<RecordType>(); |
295 | if (!RT) |
296 | return false; |
297 | const RecordDecl *RD = RT->getDecl(); |
298 | if (RD->hasFlexibleArrayMember()) |
299 | return false; |
300 | |
301 | // If this is a C++ record, check the bases first. |
302 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) |
303 | for (const auto &I : CXXRD->bases()) |
304 | if (!isEmptyRecord(Context, T: I.getType(), AllowArrays: true, AsIfNoUniqueAddr)) |
305 | return false; |
306 | |
307 | for (const auto *I : RD->fields()) |
308 | if (!isEmptyField(Context, FD: I, AllowArrays, AsIfNoUniqueAddr)) |
309 | return false; |
310 | return true; |
311 | } |
312 | |
313 | bool CodeGen::isEmptyFieldForLayout(const ASTContext &Context, |
314 | const FieldDecl *FD) { |
315 | if (FD->isZeroLengthBitField(Ctx: Context)) |
316 | return true; |
317 | |
318 | if (FD->isUnnamedBitField()) |
319 | return false; |
320 | |
321 | return isEmptyRecordForLayout(Context, T: FD->getType()); |
322 | } |
323 | |
324 | bool CodeGen::isEmptyRecordForLayout(const ASTContext &Context, QualType T) { |
325 | const RecordType *RT = T->getAs<RecordType>(); |
326 | if (!RT) |
327 | return false; |
328 | |
329 | const RecordDecl *RD = RT->getDecl(); |
330 | |
331 | // If this is a C++ record, check the bases first. |
332 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
333 | if (CXXRD->isDynamicClass()) |
334 | return false; |
335 | |
336 | for (const auto &I : CXXRD->bases()) |
337 | if (!isEmptyRecordForLayout(Context, T: I.getType())) |
338 | return false; |
339 | } |
340 | |
341 | for (const auto *I : RD->fields()) |
342 | if (!isEmptyFieldForLayout(Context, FD: I)) |
343 | return false; |
344 | |
345 | return true; |
346 | } |
347 | |
348 | const Type *CodeGen::isSingleElementStruct(QualType T, ASTContext &Context) { |
349 | const RecordType *RT = T->getAs<RecordType>(); |
350 | if (!RT) |
351 | return nullptr; |
352 | |
353 | const RecordDecl *RD = RT->getDecl(); |
354 | if (RD->hasFlexibleArrayMember()) |
355 | return nullptr; |
356 | |
357 | const Type *Found = nullptr; |
358 | |
359 | // If this is a C++ record, check the bases first. |
360 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) { |
361 | for (const auto &I : CXXRD->bases()) { |
362 | // Ignore empty records. |
363 | if (isEmptyRecord(Context, T: I.getType(), AllowArrays: true)) |
364 | continue; |
365 | |
366 | // If we already found an element then this isn't a single-element struct. |
367 | if (Found) |
368 | return nullptr; |
369 | |
370 | // If this is non-empty and not a single element struct, the composite |
371 | // cannot be a single element struct. |
372 | Found = isSingleElementStruct(T: I.getType(), Context); |
373 | if (!Found) |
374 | return nullptr; |
375 | } |
376 | } |
377 | |
378 | // Check for single element. |
379 | for (const auto *FD : RD->fields()) { |
380 | QualType FT = FD->getType(); |
381 | |
382 | // Ignore empty fields. |
383 | if (isEmptyField(Context, FD, AllowArrays: true)) |
384 | continue; |
385 | |
386 | // If we already found an element then this isn't a single-element |
387 | // struct. |
388 | if (Found) |
389 | return nullptr; |
390 | |
391 | // Treat single element arrays as the element. |
392 | while (const ConstantArrayType *AT = Context.getAsConstantArrayType(T: FT)) { |
393 | if (AT->getZExtSize() != 1) |
394 | break; |
395 | FT = AT->getElementType(); |
396 | } |
397 | |
398 | if (!isAggregateTypeForABI(T: FT)) { |
399 | Found = FT.getTypePtr(); |
400 | } else { |
401 | Found = isSingleElementStruct(T: FT, Context); |
402 | if (!Found) |
403 | return nullptr; |
404 | } |
405 | } |
406 | |
407 | // We don't consider a struct a single-element struct if it has |
408 | // padding beyond the element type. |
409 | if (Found && Context.getTypeSize(T: Found) != Context.getTypeSize(T)) |
410 | return nullptr; |
411 | |
412 | return Found; |
413 | } |
414 | |
415 | Address CodeGen::EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, |
416 | QualType Ty, const ABIArgInfo &AI) { |
417 | // This default implementation defers to the llvm backend's va_arg |
418 | // instruction. It can handle only passing arguments directly |
419 | // (typically only handled in the backend for primitive types), or |
420 | // aggregates passed indirectly by pointer (NOTE: if the "byval" |
421 | // flag has ABI impact in the callee, this implementation cannot |
422 | // work.) |
423 | |
424 | // Only a few cases are covered here at the moment -- those needed |
425 | // by the default abi. |
426 | llvm::Value *Val; |
427 | |
428 | if (AI.isIndirect()) { |
429 | assert(!AI.getPaddingType() && |
430 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!" ); |
431 | assert( |
432 | !AI.getIndirectRealign() && |
433 | "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!" ); |
434 | |
435 | auto TyInfo = CGF.getContext().getTypeInfoInChars(T: Ty); |
436 | CharUnits TyAlignForABI = TyInfo.Align; |
437 | |
438 | llvm::Type *ElementTy = CGF.ConvertTypeForMem(T: Ty); |
439 | llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementType: ElementTy); |
440 | llvm::Value *Addr = |
441 | CGF.Builder.CreateVAArg(List: VAListAddr.emitRawPointer(CGF), Ty: BaseTy); |
442 | return Address(Addr, ElementTy, TyAlignForABI); |
443 | } else { |
444 | assert((AI.isDirect() || AI.isExtend()) && |
445 | "Unexpected ArgInfo Kind in generic VAArg emitter!" ); |
446 | |
447 | assert(!AI.getInReg() && |
448 | "Unexpected InReg seen in arginfo in generic VAArg emitter!" ); |
449 | assert(!AI.getPaddingType() && |
450 | "Unexpected PaddingType seen in arginfo in generic VAArg emitter!" ); |
451 | assert(!AI.getDirectOffset() && |
452 | "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!" ); |
453 | assert(!AI.getCoerceToType() && |
454 | "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!" ); |
455 | |
456 | Address Temp = CGF.CreateMemTemp(T: Ty, Name: "varet" ); |
457 | Val = CGF.Builder.CreateVAArg(List: VAListAddr.emitRawPointer(CGF), |
458 | Ty: CGF.ConvertTypeForMem(T: Ty)); |
459 | CGF.Builder.CreateStore(Val, Addr: Temp); |
460 | return Temp; |
461 | } |
462 | } |
463 | |
464 | bool CodeGen::isSIMDVectorType(ASTContext &Context, QualType Ty) { |
465 | return Ty->getAs<VectorType>() && Context.getTypeSize(T: Ty) == 128; |
466 | } |
467 | |
468 | bool CodeGen::isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) { |
469 | const RecordType *RT = Ty->getAs<RecordType>(); |
470 | if (!RT) |
471 | return false; |
472 | const RecordDecl *RD = RT->getDecl(); |
473 | |
474 | // If this is a C++ record, check the bases first. |
475 | if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(Val: RD)) |
476 | for (const auto &I : CXXRD->bases()) |
477 | if (!isRecordWithSIMDVectorType(Context, Ty: I.getType())) |
478 | return false; |
479 | |
480 | for (const auto *i : RD->fields()) { |
481 | QualType FT = i->getType(); |
482 | |
483 | if (isSIMDVectorType(Context, Ty: FT)) |
484 | return true; |
485 | |
486 | if (isRecordWithSIMDVectorType(Context, Ty: FT)) |
487 | return true; |
488 | } |
489 | |
490 | return false; |
491 | } |
492 | |