| File: | Source/JavaScriptCore/./b3/B3LowerMacros.cpp |
| Warning: | line 804, column 34 Local variable 'targetRTT' is uncounted and unsafe |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
| 1 | /* |
| 2 | * Copyright (C) 2015-2020 Apple Inc. All rights reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions |
| 6 | * are met: |
| 7 | * 1. Redistributions of source code must retain the above copyright |
| 8 | * notice, this list of conditions and the following disclaimer. |
| 9 | * 2. Redistributions in binary form must reproduce the above copyright |
| 10 | * notice, this list of conditions and the following disclaimer in the |
| 11 | * documentation and/or other materials provided with the distribution. |
| 12 | * |
| 13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
| 14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
| 15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
| 16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
| 17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
| 18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
| 19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
| 20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
| 21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 24 | */ |
| 25 | |
| 26 | #include "config.h" |
| 27 | #include "B3LowerMacros.h" |
| 28 | |
| 29 | #if ENABLE(B3_JIT)(defined 1 && 1) |
| 30 | |
| 31 | #include "AllowMacroScratchRegisterUsage.h" |
| 32 | #include "B3AtomicValue.h" |
| 33 | #include "B3BasicBlockInlines.h" |
| 34 | #include "B3BlockInsertionSet.h" |
| 35 | #include "B3CCallValue.h" |
| 36 | #include "B3CaseCollectionInlines.h" |
| 37 | #include "B3CheckValue.h" |
| 38 | #include "B3ConstPtrValue.h" |
| 39 | #include "B3FenceValue.h" |
| 40 | #include "B3InsertionSetInlines.h" |
| 41 | #include "B3MemoryValueInlines.h" |
| 42 | #include "B3PatchpointValue.h" |
| 43 | #include "B3PhaseScope.h" |
| 44 | #include "B3StackmapGenerationParams.h" |
| 45 | #include "B3SwitchValue.h" |
| 46 | #include "B3UpsilonValue.h" |
| 47 | #include "B3UseCounts.h" |
| 48 | #include "B3ValueInlines.h" |
| 49 | #include "B3WasmRefCastValue.h" |
| 50 | #include "B3WasmRefTestValue.h" |
| 51 | #include "B3WasmStructGetValue.h" |
| 52 | #include "B3WasmStructNewValue.h" |
| 53 | #include "B3WasmStructSetValue.h" |
| 54 | #include "CCallHelpers.h" |
| 55 | #include "GPRInfo.h" |
| 56 | #include "JSCJSValueInlines.h" |
| 57 | #include "JSCell.h" |
| 58 | #include "JSObject.h" |
| 59 | #include "JSWebAssemblyStruct.h" |
| 60 | #include "LinkBuffer.h" |
| 61 | #include "MarkedSpace.h" |
| 62 | #include "WasmExceptionType.h" |
| 63 | #include "WasmFaultSignalHandler.h" |
| 64 | #include "WasmOperations.h" |
| 65 | #include "WasmThunks.h" |
| 66 | #include "WasmTypeDefinition.h" |
| 67 | #include "WebAssemblyFunctionBase.h" |
| 68 | #include "WebAssemblyGCStructure.h" |
| 69 | #include <cmath> |
| 70 | #include <numeric> |
| 71 | #include <wtf/BitVector.h> |
| 72 | |
| 73 | WTF_ALLOW_UNSAFE_BUFFER_USAGE_BEGINclang diagnostic push
clang diagnostic ignored "-Wunknown-warning-option" clang diagnostic ignored "-Wunsafe-buffer-usage" clang diagnostic ignored "-Wunsafe-buffer-usage-in-libc-call" |
| 74 | |
| 75 | namespace JSC { namespace B3 { |
| 76 | |
| 77 | namespace { |
| 78 | |
| 79 | class LowerMacros { |
| 80 | public: |
| 81 | LowerMacros(Procedure& proc) |
| 82 | : m_proc(proc) |
| 83 | , m_blockInsertionSet(proc) |
| 84 | , m_insertionSet(proc) |
| 85 | , m_useCounts(proc) |
| 86 | { |
| 87 | } |
| 88 | |
| 89 | bool run() |
| 90 | { |
| 91 | RELEASE_ASSERT(!m_proc.hasQuirks())do { if (__builtin_expect(!!(!(!m_proc.hasQuirks())), 0)) do { WTF::isIntegralOrPointerType(); compilerFenceForCrash(); WTFCrashWithInfo (91, "./b3/B3LowerMacros.cpp", __PRETTY_FUNCTION__, 584 ); } while (false); } while (0); |
| 92 | |
| 93 | for (BasicBlock* block : m_proc) { |
| 94 | m_block = block; |
| 95 | processCurrentBlock(); |
| 96 | } |
| 97 | m_changed |= m_blockInsertionSet.execute(); |
| 98 | if (m_changed) { |
| 99 | m_proc.resetReachability(); |
| 100 | m_proc.invalidateCFG(); |
| 101 | } |
| 102 | |
| 103 | // This indicates that we've |
| 104 | m_proc.setHasQuirks(true); |
| 105 | |
| 106 | return m_changed; |
| 107 | } |
| 108 | |
| 109 | private: |
| 110 | template <class Fn> |
| 111 | void replaceWithBinaryCall(Fn &&function) |
| 112 | { |
| 113 | Value* functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, tagCFunction<OperationPtrTag>(function)); |
| 114 | Value* result = m_insertionSet.insert<CCallValue>(m_index, m_value->type(), m_origin, Effects::none(), functionAddress, m_value->child(0), m_value->child(1)); |
| 115 | m_value->replaceWithIdentity(result); |
| 116 | m_changed = true; |
| 117 | } |
| 118 | |
| 119 | void processCurrentBlock() |
| 120 | { |
| 121 | for (m_index = 0; m_index < m_block->size(); ++m_index) { |
| 122 | m_value = m_block->at(m_index); |
| 123 | m_origin = m_value->origin(); |
| 124 | switch (m_value->opcode()) { |
| 125 | case Mod: { |
| 126 | if (m_value->isChill()) { |
| 127 | if (isARM64()) { |
| 128 | BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet); |
| 129 | BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block); |
| 130 | BasicBlock* normalModCase = m_blockInsertionSet.insertBefore(m_block); |
| 131 | |
| 132 | before->replaceLastWithNew<Value>(m_proc, Branch, m_origin, m_value->child(1)); |
| 133 | before->setSuccessors( |
| 134 | FrequentedBlock(normalModCase, FrequencyClass::Normal), |
| 135 | FrequentedBlock(zeroDenCase, FrequencyClass::Rare)); |
| 136 | |
| 137 | Value* divResult = normalModCase->appendNew<Value>(m_proc, chill(Div), m_origin, m_value->child(0), m_value->child(1)); |
| 138 | Value* multipliedBack = normalModCase->appendNew<Value>(m_proc, Mul, m_origin, divResult, m_value->child(1)); |
| 139 | Value* result = normalModCase->appendNew<Value>(m_proc, Sub, m_origin, m_value->child(0), multipliedBack); |
| 140 | UpsilonValue* normalResult = normalModCase->appendNew<UpsilonValue>(m_proc, m_origin, result); |
| 141 | normalModCase->appendNew<Value>(m_proc, Jump, m_origin); |
| 142 | normalModCase->setSuccessors(FrequentedBlock(m_block)); |
| 143 | |
| 144 | UpsilonValue* zeroResult = zeroDenCase->appendNew<UpsilonValue>( |
| 145 | m_proc, m_origin, |
| 146 | zeroDenCase->appendIntConstant(m_proc, m_value, 0)); |
| 147 | zeroDenCase->appendNew<Value>(m_proc, Jump, m_origin); |
| 148 | zeroDenCase->setSuccessors(FrequentedBlock(m_block)); |
| 149 | |
| 150 | Value* phi = m_insertionSet.insert<Value>(m_index, Phi, m_value->type(), m_origin); |
| 151 | normalResult->setPhi(phi); |
| 152 | zeroResult->setPhi(phi); |
| 153 | m_value->replaceWithIdentity(phi); |
| 154 | before->updatePredecessorsAfter(); |
| 155 | m_changed = true; |
| 156 | } else |
| 157 | makeDivisionChill(Mod); |
| 158 | break; |
| 159 | } |
| 160 | |
| 161 | if (m_value->type() == Double) { |
| 162 | Value* functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, tagCFunction<OperationPtrTag>(Math::fmodDouble)); |
| 163 | Value* result = m_insertionSet.insert<CCallValue>(m_index, Double, m_origin, |
| 164 | Effects::none(), |
| 165 | functionAddress, |
| 166 | m_value->child(0), |
| 167 | m_value->child(1)); |
| 168 | m_value->replaceWithIdentity(result); |
| 169 | m_changed = true; |
| 170 | } else if (m_value->type() == Float) { |
| 171 | Value* numeratorAsDouble = m_insertionSet.insert<Value>(m_index, FloatToDouble, m_origin, m_value->child(0)); |
| 172 | Value* denominatorAsDouble = m_insertionSet.insert<Value>(m_index, FloatToDouble, m_origin, m_value->child(1)); |
| 173 | Value* functionAddress = m_insertionSet.insert<ConstPtrValue>(m_index, m_origin, tagCFunction<OperationPtrTag>(Math::fmodDouble)); |
| 174 | Value* doubleMod = m_insertionSet.insert<CCallValue>(m_index, Double, m_origin, |
| 175 | Effects::none(), |
| 176 | functionAddress, |
| 177 | numeratorAsDouble, |
| 178 | denominatorAsDouble); |
| 179 | Value* result = m_insertionSet.insert<Value>(m_index, DoubleToFloat, m_origin, doubleMod); |
| 180 | m_value->replaceWithIdentity(result); |
| 181 | m_changed = true; |
| 182 | } else if constexpr (isARM_THUMB2()) { |
| 183 | if (m_value->type() == Int64) |
| 184 | replaceWithBinaryCall(Math::i64_rem_s); |
| 185 | else |
| 186 | replaceWithBinaryCall(Math::i32_rem_s); |
| 187 | } else if (isARM64()) { |
| 188 | Value* divResult = m_insertionSet.insert<Value>(m_index, chill(Div), m_origin, m_value->child(0), m_value->child(1)); |
| 189 | Value* multipliedBack = m_insertionSet.insert<Value>(m_index, Mul, m_origin, divResult, m_value->child(1)); |
| 190 | Value* result = m_insertionSet.insert<Value>(m_index, Sub, m_origin, m_value->child(0), multipliedBack); |
| 191 | m_value->replaceWithIdentity(result); |
| 192 | m_changed = true; |
| 193 | } |
| 194 | break; |
| 195 | } |
| 196 | |
| 197 | case UMod: { |
| 198 | if constexpr (isARM_THUMB2()) { |
| 199 | if (m_value->child(0)->type() == Int64) |
| 200 | replaceWithBinaryCall(Math::i64_rem_u); |
| 201 | else |
| 202 | replaceWithBinaryCall(Math::i32_rem_u); |
| 203 | break; |
| 204 | } |
| 205 | if (isARM64()) { |
| 206 | Value* divResult = m_insertionSet.insert<Value>(m_index, UDiv, m_origin, m_value->child(0), m_value->child(1)); |
| 207 | Value* multipliedBack = m_insertionSet.insert<Value>(m_index, Mul, m_origin, divResult, m_value->child(1)); |
| 208 | Value* result = m_insertionSet.insert<Value>(m_index, Sub, m_origin, m_value->child(0), multipliedBack); |
| 209 | m_value->replaceWithIdentity(result); |
| 210 | m_changed = true; |
| 211 | } |
| 212 | break; |
| 213 | } |
| 214 | |
| 215 | case UDiv: { |
| 216 | if constexpr (!isARM_THUMB2()) |
| 217 | break; |
| 218 | if (m_value->type() == Int64) |
| 219 | replaceWithBinaryCall(Math::i64_div_u); |
| 220 | else |
| 221 | replaceWithBinaryCall(Math::i32_div_u); |
| 222 | break; |
| 223 | } |
| 224 | case FMax: |
| 225 | case FMin: { |
| 226 | if (isX86() || isARM_THUMB2()) { |
| 227 | bool isMax = m_value->opcode() == FMax; |
| 228 | |
| 229 | Value* a = m_value->child(0); |
| 230 | Value* b = m_value->child(1); |
| 231 | |
| 232 | Value* isEqualValue = m_insertionSet.insert<Value>( |
| 233 | m_index, Equal, m_origin, a, b); |
| 234 | |
| 235 | BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet); |
| 236 | |
| 237 | BasicBlock* isEqual = m_blockInsertionSet.insertBefore(m_block); |
| 238 | BasicBlock* notEqual = m_blockInsertionSet.insertBefore(m_block); |
| 239 | BasicBlock* isLessThan = m_blockInsertionSet.insertBefore(m_block); |
| 240 | BasicBlock* notLessThan = m_blockInsertionSet.insertBefore(m_block); |
| 241 | BasicBlock* isGreaterThan = m_blockInsertionSet.insertBefore(m_block); |
| 242 | BasicBlock* isNaN = m_blockInsertionSet.insertBefore(m_block); |
| 243 | |
| 244 | before->replaceLastWithNew<Value>(m_proc, Branch, m_origin, isEqualValue); |
| 245 | before->setSuccessors(FrequentedBlock(isEqual), FrequentedBlock(notEqual)); |
| 246 | |
| 247 | Value* lessThanValue = notEqual->appendNew<Value>(m_proc, LessThan, m_origin, a, b); |
| 248 | notEqual->appendNew<Value>(m_proc, Branch, m_origin, lessThanValue); |
| 249 | notEqual->setSuccessors(FrequentedBlock(isLessThan), FrequentedBlock(notLessThan)); |
| 250 | |
| 251 | Value* greaterThanValue = notLessThan->appendNew<Value>(m_proc, GreaterThan, m_origin, a, b); |
| 252 | notLessThan->appendNew<Value>(m_proc, Branch, m_origin, greaterThanValue); |
| 253 | notLessThan->setSuccessors(FrequentedBlock(isGreaterThan), FrequentedBlock(isNaN)); |
| 254 | |
| 255 | UpsilonValue* isLessThanResult = isLessThan->appendNew<UpsilonValue>( |
| 256 | m_proc, m_origin, isMax ? b : a); |
| 257 | isLessThan->appendNew<Value>(m_proc, Jump, m_origin); |
| 258 | isLessThan->setSuccessors(FrequentedBlock(m_block)); |
| 259 | |
| 260 | UpsilonValue* isGreaterThanResult = isGreaterThan->appendNew<UpsilonValue>( |
| 261 | m_proc, m_origin, isMax ? a : b); |
| 262 | isGreaterThan->appendNew<Value>(m_proc, Jump, m_origin); |
| 263 | isGreaterThan->setSuccessors(FrequentedBlock(m_block)); |
| 264 | |
| 265 | UpsilonValue* isEqualResult = isEqual->appendNew<UpsilonValue>( |
| 266 | m_proc, m_origin, isEqual->appendNew<Value>(m_proc, isMax ? BitAnd : BitOr, m_origin, a, b)); |
| 267 | isEqual->appendNew<Value>(m_proc, Jump, m_origin); |
| 268 | isEqual->setSuccessors(FrequentedBlock(m_block)); |
| 269 | |
| 270 | UpsilonValue* isNaNResult = isNaN->appendNew<UpsilonValue>( |
| 271 | m_proc, m_origin, isNaN->appendNew<Value>(m_proc, Add, m_origin, a, b)); |
| 272 | isNaN->appendNew<Value>(m_proc, Jump, m_origin); |
| 273 | isNaN->setSuccessors(FrequentedBlock(m_block)); |
| 274 | |
| 275 | Value* phi = m_insertionSet.insert<Value>( |
| 276 | m_index, Phi, m_value->type(), m_origin); |
| 277 | isLessThanResult->setPhi(phi); |
| 278 | isGreaterThanResult->setPhi(phi); |
| 279 | isEqualResult->setPhi(phi); |
| 280 | isNaNResult->setPhi(phi); |
| 281 | |
| 282 | m_value->replaceWithIdentity(phi); |
| 283 | before->updatePredecessorsAfter(); |
| 284 | m_changed = true; |
| 285 | } |
| 286 | break; |
| 287 | } |
| 288 | |
| 289 | case Div: { |
| 290 | if (m_value->isChill()) |
| 291 | makeDivisionChill(Div); |
| 292 | else if (isARM_THUMB2() && (m_value->type() == Int64 || m_value->type() == Int32)) { |
| 293 | BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index); |
| 294 | before->replaceLastWithNew<Value>(m_proc, Nop, m_origin); |
| 295 | Value* result = callDivModHelper(before, Div, m_value->child(0), m_value->child(1)); |
| 296 | before->appendNew<Value>(m_proc, Jump, m_origin); |
| 297 | before->setSuccessors(FrequentedBlock(m_block)); |
| 298 | m_value->replaceWithIdentity(result); |
| 299 | m_changed = true; |
| 300 | } |
| 301 | break; |
| 302 | } |
| 303 | |
| 304 | case Switch: { |
| 305 | SwitchValue* switchValue = m_value->as<SwitchValue>(); |
| 306 | Vector<SwitchCase> cases; |
| 307 | for (SwitchCase switchCase : switchValue->cases(m_block)) |
| 308 | cases.append(switchCase); |
| 309 | std::ranges::sort(cases, { }, &SwitchCase::caseValue); |
| 310 | FrequentedBlock fallThrough = m_block->fallThrough(); |
| 311 | m_block->values().removeLast(); |
| 312 | recursivelyBuildSwitch(cases, fallThrough, 0, false, cases.size(), m_block); |
| 313 | m_proc.deleteValue(switchValue); |
| 314 | m_block->updatePredecessorsAfter(); |
| 315 | m_changed = true; |
| 316 | break; |
| 317 | } |
| 318 | |
| 319 | case Depend: { |
| 320 | if (isX86()) { |
| 321 | // Create a load-load fence. This codegens to nothing on X86. We use it to tell the |
| 322 | // compiler not to block load motion. |
| 323 | FenceValue* fence = m_insertionSet.insert<FenceValue>(m_index, m_origin); |
| 324 | fence->read = HeapRange(); |
| 325 | fence->write = HeapRange::top(); |
| 326 | |
| 327 | // Kill the Depend, which should unlock a bunch of code simplification. |
| 328 | m_value->replaceWithBottom(m_insertionSet, m_index); |
| 329 | |
| 330 | m_changed = true; |
| 331 | } |
| 332 | break; |
| 333 | } |
| 334 | |
| 335 | case AtomicWeakCAS: |
| 336 | case AtomicStrongCAS: { |
| 337 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
| 338 | Width width = atomic->accessWidth(); |
| 339 | |
| 340 | if (isCanonicalWidth(width)) |
| 341 | break; |
| 342 | |
| 343 | Value* expectedValue = atomic->child(0); |
| 344 | |
| 345 | if (!isX86()) { |
| 346 | // On ARM, the load part of the CAS does a load with zero extension. Therefore, we need |
| 347 | // to zero-extend the input. |
| 348 | Value* maskedExpectedValue = m_insertionSet.insert<Value>( |
| 349 | m_index, BitAnd, m_origin, expectedValue, |
| 350 | m_insertionSet.insertIntConstant(m_index, expectedValue, mask(width))); |
| 351 | |
| 352 | atomic->child(0) = maskedExpectedValue; |
| 353 | m_changed = true; |
| 354 | } |
| 355 | |
| 356 | if (atomic->opcode() == AtomicStrongCAS) { |
| 357 | Value* newValue = m_insertionSet.insert<Value>( |
| 358 | m_index, signExtendOpcode(width), m_origin, |
| 359 | m_insertionSet.insertClone(m_index, atomic)); |
| 360 | |
| 361 | atomic->replaceWithIdentity(newValue); |
| 362 | m_changed = true; |
| 363 | } |
| 364 | |
| 365 | break; |
| 366 | } |
| 367 | |
| 368 | case AtomicXchgAdd: |
| 369 | case AtomicXchgAnd: |
| 370 | case AtomicXchgOr: |
| 371 | case AtomicXchgSub: |
| 372 | case AtomicXchgXor: |
| 373 | case AtomicXchg: { |
| 374 | // On X86, these may actually return garbage in the high bits. On ARM64, these sorta |
| 375 | // zero-extend their high bits, except that the high bits might get polluted by high |
| 376 | // bits in the operand. So, either way, we need to throw a sign-extend on these |
| 377 | // things. |
| 378 | |
| 379 | if (isX86()) { |
| 380 | if (m_value->opcode() == AtomicXchgSub && m_useCounts.numUses(m_value)) { |
| 381 | // On x86, xchgadd is better than xchgsub if it has any users. |
| 382 | m_value->setOpcodeUnsafely(AtomicXchgAdd); |
| 383 | m_value->child(0) = m_insertionSet.insert<Value>( |
| 384 | m_index, Neg, m_origin, m_value->child(0)); |
| 385 | } |
| 386 | |
| 387 | bool exempt = false; |
| 388 | switch (m_value->opcode()) { |
| 389 | case AtomicXchgAnd: |
| 390 | case AtomicXchgOr: |
| 391 | case AtomicXchgSub: |
| 392 | case AtomicXchgXor: |
| 393 | exempt = true; |
| 394 | break; |
| 395 | default: |
| 396 | break; |
| 397 | } |
| 398 | if (exempt) |
| 399 | break; |
| 400 | } |
| 401 | |
| 402 | if (isARM64_LSE()) { |
| 403 | if (m_value->opcode() == AtomicXchgSub) { |
| 404 | m_value->setOpcodeUnsafely(AtomicXchgAdd); |
| 405 | m_value->child(0) = m_insertionSet.insert<Value>( |
| 406 | m_index, Neg, m_origin, m_value->child(0)); |
| 407 | } |
| 408 | } |
| 409 | |
| 410 | AtomicValue* atomic = m_value->as<AtomicValue>(); |
| 411 | Width width = atomic->accessWidth(); |
| 412 | |
| 413 | if (isCanonicalWidth(width)) |
| 414 | break; |
| 415 | |
| 416 | Value* newValue = m_insertionSet.insert<Value>( |
| 417 | m_index, signExtendOpcode(width), m_origin, |
| 418 | m_insertionSet.insertClone(m_index, atomic)); |
| 419 | |
| 420 | atomic->replaceWithIdentity(newValue); |
| 421 | m_changed = true; |
| 422 | break; |
| 423 | } |
| 424 | |
| 425 | case Load8Z: |
| 426 | case Load16Z: { |
| 427 | if (isX86()) |
| 428 | break; |
| 429 | |
| 430 | MemoryValue* memory = m_value->as<MemoryValue>(); |
| 431 | if (!memory->hasFence()) |
| 432 | break; |
| 433 | |
| 434 | // Sub-width load-acq on ARM64 always sign extends. |
| 435 | Value* newLoad = m_insertionSet.insertClone(m_index, memory); |
| 436 | newLoad->setOpcodeUnsafely(memory->opcode() == Load8Z ? Load8S : Load16S); |
| 437 | |
| 438 | Value* newValue = m_insertionSet.insert<Value>( |
| 439 | m_index, BitAnd, m_origin, newLoad, |
| 440 | m_insertionSet.insertIntConstant( |
| 441 | m_index, m_origin, Int32, mask(memory->accessWidth()))); |
| 442 | |
| 443 | m_value->replaceWithIdentity(newValue); |
| 444 | m_changed = true; |
| 445 | break; |
| 446 | } |
| 447 | |
| 448 | case VectorPopcnt: { |
| 449 | if (!isX86()) |
| 450 | break; |
| 451 | ASSERT(m_value->as<SIMDValue>()->simdLane() == SIMDLane::i8x16)((void)0); |
| 452 | |
| 453 | // x86_64 does not natively support vector lanewise popcount, so we emulate it using multiple |
| 454 | // masks. |
| 455 | |
| 456 | v128_t bottomNibbleConst; |
| 457 | v128_t popcntConst; |
| 458 | bottomNibbleConst.u64x2[0] = 0x0f0f0f0f0f0f0f0f; |
| 459 | bottomNibbleConst.u64x2[1] = 0x0f0f0f0f0f0f0f0f; |
| 460 | popcntConst.u64x2[0] = 0x0302020102010100; |
| 461 | popcntConst.u64x2[1] = 0x0403030203020201; |
| 462 | Value* bottomNibbleMask = m_insertionSet.insert<Const128Value>(m_index, m_origin, bottomNibbleConst); |
| 463 | Value* popcntMask = m_insertionSet.insert<Const128Value>(m_index, m_origin, popcntConst); |
| 464 | |
| 465 | Value* four = m_insertionSet.insert<Const32Value>(m_index, m_origin, 4); |
| 466 | Value* v = m_value->child(0); |
| 467 | Value* upper = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorAndnot, B3::V128, SIMDLane::v128, SIMDSignMode::None, v, bottomNibbleMask); |
| 468 | Value* lower = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorAnd, B3::V128, SIMDLane::v128, SIMDSignMode::None, v, bottomNibbleMask); |
| 469 | upper = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorShr, B3::V128, SIMDLane::i16x8, SIMDSignMode::Unsigned, upper, four); |
| 470 | lower = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorSwizzle, B3::V128, SIMDLane::i8x16, SIMDSignMode::None, popcntMask, lower); |
| 471 | upper = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorSwizzle, B3::V128, SIMDLane::i8x16, SIMDSignMode::None, popcntMask, upper); |
| 472 | Value* result = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorAdd, B3::V128, SIMDLane::i8x16, SIMDSignMode::None, upper, lower); |
| 473 | m_value->replaceWithIdentity(result); |
| 474 | m_changed = true; |
| 475 | break; |
| 476 | } |
| 477 | |
| 478 | case VectorNot: { |
| 479 | if (!isX86()) |
| 480 | break; |
| 481 | // x86_64 has no vector bitwise NOT instruction, so we expand vxv.not v into vxv.xor -1, v |
| 482 | // here to give B3/Air a chance to optimize out repeated usage of the mask. |
| 483 | v128_t mask; |
| 484 | mask.u64x2[0] = 0xffffffffffffffff; |
| 485 | mask.u64x2[1] = 0xffffffffffffffff; |
| 486 | Value* ones = m_insertionSet.insert<Const128Value>(m_index, m_origin, mask); |
| 487 | Value* result = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorXor, B3::V128, SIMDLane::v128, SIMDSignMode::None, ones, m_value->child(0)); |
| 488 | m_value->replaceWithIdentity(result); |
| 489 | m_changed = true; |
| 490 | break; |
| 491 | } |
| 492 | |
| 493 | case VectorNeg: { |
| 494 | if (!isX86()) |
| 495 | break; |
| 496 | // x86_64 has no vector negate instruction. For integer vectors, we can replicate negation by |
| 497 | // subtracting from zero. For floating-point vectors, we need to toggle the sign using packed |
| 498 | // XOR. |
| 499 | SIMDValue* value = m_value->as<SIMDValue>(); |
| 500 | switch (value->simdLane()) { |
| 501 | case SIMDLane::i8x16: |
| 502 | case SIMDLane::i16x8: |
| 503 | case SIMDLane::i32x4: |
| 504 | case SIMDLane::i64x2: { |
| 505 | Value* zero = m_insertionSet.insert<Const128Value>(m_index, m_origin, v128_t()); |
| 506 | Value* result = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorSub, B3::V128, value->simdInfo(), zero, m_value->child(0)); |
| 507 | m_value->replaceWithIdentity(result); |
| 508 | m_changed = true; |
| 509 | break; |
| 510 | } |
| 511 | case SIMDLane::f32x4: { |
| 512 | Value* topBit = m_insertionSet.insert<Const32Value>(m_index, m_origin, 0x80000000u); |
| 513 | Value* floatMask = m_insertionSet.insert<Value>(m_index, BitwiseCast, m_origin, topBit); |
| 514 | Value* vectorMask = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorSplat, B3::V128, SIMDLane::f32x4, SIMDSignMode::None, floatMask); |
| 515 | Value* result = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorXor, B3::V128, SIMDLane::v128, SIMDSignMode::None, m_value->child(0), vectorMask); |
| 516 | m_value->replaceWithIdentity(result); |
| 517 | m_changed = true; |
| 518 | break; |
| 519 | } |
| 520 | case SIMDLane::f64x2: { |
| 521 | Value* topBit = m_insertionSet.insert<Const64Value>(m_index, m_origin, 0x8000000000000000ull); |
| 522 | Value* doubleMask = m_insertionSet.insert<Value>(m_index, BitwiseCast, m_origin, topBit); |
| 523 | Value* vectorMask = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorSplat, B3::V128, SIMDLane::f64x2, SIMDSignMode::None, doubleMask); |
| 524 | Value* result = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorXor, B3::V128, SIMDLane::v128, SIMDSignMode::None, m_value->child(0), vectorMask); |
| 525 | m_value->replaceWithIdentity(result); |
| 526 | m_changed = true; |
| 527 | break; |
| 528 | } |
| 529 | default: |
| 530 | RELEASE_ASSERT_NOT_REACHED()do { WTF::isIntegralOrPointerType(); compilerFenceForCrash(); WTFCrashWithInfo(530, "./b3/B3LowerMacros.cpp", __PRETTY_FUNCTION__ , 585 ); } while (false); |
| 531 | } |
| 532 | break; |
| 533 | } |
| 534 | |
| 535 | case VectorNotEqual: |
| 536 | if (isX86()) |
| 537 | invertedComparisonByXor(VectorEqual, m_value->child(0), m_value->child(1)); |
| 538 | break; |
| 539 | case VectorAbove: |
| 540 | if (isX86()) |
| 541 | invertedComparisonByXor(VectorBelowOrEqual, m_value->child(0), m_value->child(1)); |
| 542 | break; |
| 543 | case VectorBelow: |
| 544 | if (isX86()) |
| 545 | invertedComparisonByXor(VectorAboveOrEqual, m_value->child(0), m_value->child(1)); |
| 546 | break; |
| 547 | case VectorGreaterThanOrEqual: |
| 548 | if (isX86() && m_value->as<SIMDValue>()->simdLane() == SIMDLane::i64x2) { |
| 549 | // Note: rhs and lhs are reversed here, we are semantically negating LessThan. GreaterThan is |
| 550 | // just better supported on AVX. |
| 551 | invertedComparisonByXor(VectorGreaterThan, m_value->child(1), m_value->child(0)); |
| 552 | } |
| 553 | break; |
| 554 | case VectorLessThanOrEqual: |
| 555 | if (isX86() && m_value->as<SIMDValue>()->simdLane() == SIMDLane::i64x2) |
| 556 | invertedComparisonByXor(VectorGreaterThan, m_value->child(0), m_value->child(1)); |
| 557 | break; |
| 558 | case VectorShr: |
| 559 | case VectorShl: { |
| 560 | if constexpr (!isARM64()) |
| 561 | break; |
| 562 | SIMDValue* value = m_value->as<SIMDValue>(); |
| 563 | SIMDLane lane = value->simdLane(); |
| 564 | |
| 565 | int32_t mask = (elementByteSize(lane) * CHAR_BIT8) - 1; |
| 566 | Value* shiftAmount = m_insertionSet.insert<Value>(m_index, BitAnd, m_origin, value->child(1), m_insertionSet.insertIntConstant(m_index, m_origin, Int32, mask)); |
| 567 | if (value->opcode() == VectorShr) { |
| 568 | // ARM64 doesn't have a version of this instruction for right shift. Instead, if the input to |
| 569 | // left shift is negative, it's a right shift by the absolute value of that amount. |
| 570 | shiftAmount = m_insertionSet.insert<Value>(m_index, Neg, m_origin, shiftAmount); |
| 571 | } |
| 572 | Value* shiftVector = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorSplat, B3::V128, SIMDLane::i8x16, SIMDSignMode::None, shiftAmount); |
| 573 | Value* result = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorShiftByVector, B3::V128, value->simdInfo(), value->child(0), shiftVector); |
| 574 | m_value->replaceWithIdentity(result); |
| 575 | m_changed = true; |
| 576 | break; |
| 577 | } |
| 578 | |
| 579 | case WasmStructGet: { |
| 580 | WasmStructGetValue* structGet = m_value->as<WasmStructGetValue>(); |
| 581 | Value* structPtr = structGet->child(0); |
| 582 | SUPPRESS_UNCOUNTED_LOCAL[[clang::suppress]] const Wasm::StructType* structType = structGet->structType(); |
| 583 | Wasm::StructFieldCount fieldIndex = structGet->fieldIndex(); |
| 584 | auto fieldType = structType->field(fieldIndex).type; |
| 585 | bool canTrap = structGet->kind().traps(); |
| 586 | HeapRange range = structGet->range(); |
| 587 | Mutability mutability = structGet->mutability(); |
| 588 | |
| 589 | int32_t fieldOffset = JSWebAssemblyStruct::offsetOfData() + structType->offsetOfFieldInPayload(fieldIndex); |
| 590 | |
| 591 | auto wrapTrapping = [&](auto input) -> B3::Kind { |
| 592 | if (canTrap) |
| 593 | return trapping(input); |
| 594 | return input; |
| 595 | }; |
| 596 | |
| 597 | Value* result; |
| 598 | if (fieldType.is<Wasm::PackedType>()) { |
| 599 | switch (fieldType.as<Wasm::PackedType>()) { |
| 600 | case Wasm::PackedType::I8: |
| 601 | result = m_insertionSet.insert<MemoryValue>(m_index, wrapTrapping(Load8Z), Int32, m_origin, structPtr, fieldOffset, range); |
| 602 | break; |
| 603 | case Wasm::PackedType::I16: |
| 604 | result = m_insertionSet.insert<MemoryValue>(m_index, wrapTrapping(Load16Z), Int32, m_origin, structPtr, fieldOffset, range); |
| 605 | break; |
| 606 | } |
| 607 | } else { |
| 608 | ASSERT(fieldType.is<Wasm::Type>())((void)0); |
| 609 | auto unpacked = fieldType.unpacked(); |
| 610 | Type b3Type; |
| 611 | switch (unpacked.kind) { |
| 612 | case Wasm::TypeKind::I32: |
| 613 | b3Type = Int32; |
| 614 | break; |
| 615 | case Wasm::TypeKind::I64: |
| 616 | b3Type = Int64; |
| 617 | break; |
| 618 | case Wasm::TypeKind::F32: |
| 619 | b3Type = Float; |
| 620 | break; |
| 621 | case Wasm::TypeKind::F64: |
| 622 | b3Type = Double; |
| 623 | break; |
| 624 | case Wasm::TypeKind::V128: |
| 625 | b3Type = V128; |
| 626 | break; |
| 627 | default: |
| 628 | // Reference types are stored as Int64 (pointer-sized) |
| 629 | b3Type = Int64; |
| 630 | break; |
| 631 | } |
| 632 | result = m_insertionSet.insert<MemoryValue>(m_index, wrapTrapping(Load), b3Type, m_origin, structPtr, fieldOffset, range); |
| 633 | } |
| 634 | |
| 635 | result->as<MemoryValue>()->setReadsMutability(mutability); |
| 636 | m_value->replaceWithIdentity(result); |
| 637 | m_changed = true; |
| 638 | break; |
| 639 | } |
| 640 | |
| 641 | case WasmStructSet: { |
| 642 | WasmStructSetValue* structSet = m_value->as<WasmStructSetValue>(); |
| 643 | Value* structPtr = structSet->child(0); |
| 644 | Value* value = structSet->child(1); |
| 645 | SUPPRESS_UNCOUNTED_LOCAL[[clang::suppress]] const Wasm::StructType* structType = structSet->structType(); |
| 646 | Wasm::StructFieldCount fieldIndex = structSet->fieldIndex(); |
| 647 | auto fieldType = structType->field(fieldIndex).type; |
| 648 | bool canTrap = structSet->kind().traps(); |
| 649 | HeapRange range = structSet->range(); |
| 650 | |
| 651 | int32_t fieldOffset = JSWebAssemblyStruct::offsetOfData() + structType->offsetOfFieldInPayload(fieldIndex); |
| 652 | |
| 653 | auto wrapTrapping = [&](auto input) -> B3::Kind { |
| 654 | if (canTrap) |
| 655 | return trapping(input); |
| 656 | return input; |
| 657 | }; |
| 658 | |
| 659 | if (fieldType.is<Wasm::PackedType>()) { |
| 660 | switch (fieldType.as<Wasm::PackedType>()) { |
| 661 | case Wasm::PackedType::I8: |
| 662 | m_insertionSet.insert<MemoryValue>(m_index, wrapTrapping(Store8), m_origin, value, structPtr, fieldOffset, range); |
| 663 | break; |
| 664 | case Wasm::PackedType::I16: |
| 665 | m_insertionSet.insert<MemoryValue>(m_index, wrapTrapping(Store16), m_origin, value, structPtr, fieldOffset, range); |
| 666 | break; |
| 667 | } |
| 668 | } else |
| 669 | m_insertionSet.insert<MemoryValue>(m_index, wrapTrapping(Store), m_origin, value, structPtr, fieldOffset, range); |
| 670 | |
| 671 | m_value->replaceWithNop(); |
| 672 | m_changed = true; |
| 673 | break; |
| 674 | } |
| 675 | |
| 676 | case WasmStructNew: { |
| 677 | WasmStructNewValue* structNew = m_value->as<WasmStructNewValue>(); |
| 678 | Value* instance = structNew->instance(); |
| 679 | Value* structureID = structNew->structureID(); |
| 680 | SUPPRESS_UNCOUNTED_LOCAL[[clang::suppress]] const Wasm::StructType* structType = structNew->structType(); |
| 681 | uint32_t typeIndex = structNew->typeIndex(); |
| 682 | int32_t allocatorsBaseOffset = structNew->allocatorsBaseOffset(); |
| 683 | |
| 684 | size_t allocationSize = JSWebAssemblyStruct::allocationSize(structType->instancePayloadSize()); |
| 685 | |
| 686 | static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two."); |
| 687 | unsigned stepShift = getLSBSet(MarkedSpace::sizeStep); |
| 688 | size_t sizeClass = (allocationSize + MarkedSpace::sizeStep - 1) >> stepShift; |
| 689 | bool useFastPath = (sizeClass <= (MarkedSpace::largeCutoff >> stepShift)); |
| 690 | |
| 691 | BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet); |
| 692 | BasicBlock* slowPath = m_blockInsertionSet.insertBefore(m_block); |
| 693 | |
| 694 | UpsilonValue* fastUpsilon = nullptr; |
| 695 | if (useFastPath) { |
| 696 | BasicBlock* fastPath = m_blockInsertionSet.insertBefore(m_block); |
| 697 | BasicBlock* fastPathContinuation = m_blockInsertionSet.insertBefore(m_block); |
| 698 | |
| 699 | // Replace the Jump added by splitForward with Nop so we can add our own control flow |
| 700 | before->replaceLastWithNew<Value>(m_proc, Nop, m_origin); |
| 701 | |
| 702 | int32_t allocatorOffset = allocatorsBaseOffset + static_cast<int32_t>(sizeClass * sizeof(Allocator)); |
| 703 | Value* allocator = before->appendNew<MemoryValue>(m_proc, Load, pointerType(), m_origin, instance, allocatorOffset); |
| 704 | |
| 705 | Value* allocatorIsNull = before->appendNew<Value>(m_proc, Equal, m_origin, allocator, before->appendIntConstant(m_proc, m_origin, pointerType(), 0)); |
| 706 | before->appendNew<Value>(m_proc, Branch, m_origin, allocatorIsNull); |
| 707 | before->setSuccessors(FrequentedBlock(slowPath, FrequencyClass::Rare), FrequentedBlock(fastPath)); |
| 708 | |
| 709 | PatchpointValue* patchpoint = fastPath->appendNew<PatchpointValue>(m_proc, pointerType(), m_origin); |
| 710 | if (isARM64()) { |
| 711 | // emitAllocateWithNonNullAllocator uses the scratch registers on ARM. |
| 712 | patchpoint->clobber(RegisterSetBuilder::macroClobberedGPRs()); |
| 713 | } |
| 714 | patchpoint->effects.terminal = true; |
| 715 | patchpoint->appendSomeRegisterWithClobber(allocator); |
| 716 | patchpoint->numGPScratchRegisters++; |
| 717 | patchpoint->resultConstraints = { ValueRep::SomeEarlyRegister }; |
| 718 | |
| 719 | patchpoint->setGenerator([=](CCallHelpers& jit, const StackmapGenerationParams& params) { |
| 720 | AllowMacroScratchRegisterUsage allowScratch(jit); |
| 721 | CCallHelpers::JumpList jumpToSlowPath; |
| 722 | |
| 723 | GPRReg allocatorGPR = params[1].gpr(); |
| 724 | |
| 725 | // We use a patchpoint to emit the allocation path because whenever we mess with |
| 726 | // allocation paths, we already reason about them at the machine code level. We know |
| 727 | // exactly what instruction sequence we want. We're confident that no compiler |
| 728 | // optimization could make this code better. So, it's best to have the code in |
| 729 | // AssemblyHelpers::emitAllocate(). That way, the same optimized path is shared by |
| 730 | // all of the compiler tiers. |
| 731 | jit.emitAllocateWithNonNullAllocator( |
| 732 | params[0].gpr(), JITAllocator::variableNonNull(), allocatorGPR, params.gpScratch(0), |
| 733 | jumpToSlowPath, CCallHelpers::SlowAllocationResult::UndefinedBehavior); |
| 734 | |
| 735 | CCallHelpers::Jump jumpToSuccess; |
| 736 | if (!params.fallsThroughToSuccessor(0)) |
| 737 | jumpToSuccess = jit.jump(); |
| 738 | |
| 739 | Vector<Box<CCallHelpers::Label>> labels = params.successorLabels(); |
| 740 | |
| 741 | params.addLatePath([=](CCallHelpers& jit) { |
| 742 | jumpToSlowPath.linkTo(*labels[1], &jit); |
| 743 | if (jumpToSuccess.isSet()) |
| 744 | jumpToSuccess.linkTo(*labels[0], &jit); |
| 745 | }); |
| 746 | }); |
| 747 | |
| 748 | fastPath->appendSuccessor({ fastPathContinuation, FrequencyClass::Normal }); |
| 749 | fastPath->appendSuccessor({ slowPath, FrequencyClass::Rare }); |
| 750 | |
| 751 | // Header initialization happens in fastPathContinuation, not in fastPath |
| 752 | Value* cell = patchpoint; |
| 753 | Value* typeInfo = fastPathContinuation->appendNew<Const32Value>(m_proc, m_origin, JSWebAssemblyStruct::typeInfoBlob().blob()); |
| 754 | fastPathContinuation->appendNew<MemoryValue>(m_proc, Store, m_origin, structureID, cell, static_cast<int32_t>(JSCell::structureIDOffset())); |
| 755 | fastPathContinuation->appendNew<MemoryValue>(m_proc, Store, m_origin, typeInfo, cell, static_cast<int32_t>(JSCell::indexingTypeAndMiscOffset())); |
| 756 | fastPathContinuation->appendNew<MemoryValue>(m_proc, Store, m_origin, fastPathContinuation->appendIntConstant(m_proc, m_origin, pointerType(), 0), cell, static_cast<int32_t>(JSObject::butterflyOffset())); |
| 757 | |
| 758 | fastUpsilon = fastPathContinuation->appendNew<UpsilonValue>(m_proc, m_origin, cell); |
| 759 | fastPathContinuation->appendNew<Value>(m_proc, Jump, m_origin); |
| 760 | fastPathContinuation->setSuccessors(m_block); |
| 761 | } else { |
| 762 | // Just redirect the Jump added by splitForward to slowPath |
| 763 | before->setSuccessors(slowPath); |
| 764 | } |
| 765 | |
| 766 | Value* slowFunctionAddress = slowPath->appendNew<ConstPtrValue>(m_proc, m_origin, tagCFunction<OperationPtrTag>(Wasm::operationWasmStructNewEmpty)); |
| 767 | Value* typeIndexValue = slowPath->appendNew<Const32Value>(m_proc, m_origin, typeIndex); |
| 768 | Value* slowResult = slowPath->appendNew<CCallValue>(m_proc, Int64, m_origin, Effects::forCall(), slowFunctionAddress, instance, typeIndexValue); |
| 769 | |
| 770 | // Null check for slow path result |
| 771 | Value* isNull = slowPath->appendNew<Value>(m_proc, Equal, m_origin, slowResult, slowPath->appendNew<Const64Value>(m_proc, m_origin, JSValue::encode(jsNull()))); |
| 772 | CheckValue* check = slowPath->appendNew<CheckValue>(m_proc, Check, m_origin, isNull); |
| 773 | check->setGenerator([=](CCallHelpers& jit, const StackmapGenerationParams&) { |
| 774 | jit.move(CCallHelpers::TrustedImm32(static_cast<uint32_t>(Wasm::ExceptionType::BadStructNew)), GPRInfo::argumentGPR1); |
| 775 | jit.nearCallThunk(CodeLocationLabel<JITThunkPtrTag>(Wasm::Thunks::singleton().stub(Wasm::throwExceptionFromOMGThunkGenerator).code())); |
| 776 | }); |
| 777 | |
| 778 | UpsilonValue* slowUpsilon = slowPath->appendNew<UpsilonValue>(m_proc, m_origin, slowResult); |
| 779 | slowPath->appendNew<Value>(m_proc, Jump, m_origin); |
| 780 | slowPath->setSuccessors(m_block); |
| 781 | |
| 782 | Value* phi = m_insertionSet.insert<Value>(m_index, Phi, pointerType(), m_origin); |
| 783 | if (fastUpsilon) |
| 784 | fastUpsilon->setPhi(phi); |
| 785 | slowUpsilon->setPhi(phi); |
| 786 | |
| 787 | m_insertionSet.insert<MemoryValue>(m_index, Store, m_origin, m_insertionSet.insert<Const32Value>(m_index, m_origin, structType->instancePayloadSize()), phi, static_cast<int32_t>(JSWebAssemblyStruct::offsetOfSize())); |
| 788 | |
| 789 | m_value->replaceWithIdentity(phi); |
| 790 | before->updatePredecessorsAfter(); |
| 791 | m_changed = true; |
| 792 | break; |
| 793 | } |
| 794 | |
| 795 | case WasmRefCast: |
| 796 | case WasmRefTest: { |
| 797 | WasmRefTypeCheckValue* typeCheck = m_value->as<WasmRefTypeCheckValue>(); |
| 798 | Value* reference = typeCheck->child(0); |
| 799 | int32_t targetHeapType = typeCheck->targetHeapType(); |
| 800 | bool allowNull = typeCheck->allowNull(); |
| 801 | bool referenceIsNullable = typeCheck->referenceIsNullable(); |
| 802 | bool definitelyIsCellOrNull = typeCheck->definitelyIsCellOrNull(); |
| 803 | bool definitelyIsWasmGCObjectOrNull = typeCheck->definitelyIsWasmGCObjectOrNull(); |
| 804 | const Wasm::RTT* targetRTT = typeCheck->targetRTT(); |
Local variable 'targetRTT' is uncounted and unsafe | |
| 805 | bool targetIsFinal = typeCheck->targetIsFinal(); |
| 806 | bool targetIsFunction = typeCheck->targetIsFunction(); |
| 807 | bool isCast = typeCheck->kind().opcode() == WasmRefCast; |
| 808 | bool shouldNegate = !isCast && typeCheck->as<WasmRefTestValue>()->shouldNegate(); |
| 809 | |
| 810 | BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet); |
| 811 | BasicBlock* continuation = m_block; |
| 812 | |
| 813 | BasicBlock* trueBlock = nullptr; |
| 814 | BasicBlock* falseBlock = nullptr; |
| 815 | if (!isCast) { |
| 816 | trueBlock = m_blockInsertionSet.insertBefore(m_block); |
| 817 | falseBlock = m_blockInsertionSet.insertBefore(m_block); |
| 818 | } |
| 819 | |
| 820 | auto castFailure = [=](CCallHelpers& jit, const StackmapGenerationParams&) { |
| 821 | jit.move(CCallHelpers::TrustedImm32(static_cast<uint32_t>(Wasm::ExceptionType::CastFailure)), GPRInfo::argumentGPR1); |
| 822 | jit.nearCallThunk(CodeLocationLabel<JITThunkPtrTag>(Wasm::Thunks::singleton().stub(Wasm::throwExceptionFromOMGThunkGenerator).code())); |
| 823 | }; |
| 824 | |
| 825 | auto emitCheckOrBranch = [&](Value* condition, BasicBlock* currentBlock) { |
| 826 | if (isCast) { |
| 827 | CheckValue* check = currentBlock->appendNew<CheckValue>(m_proc, Check, m_origin, condition); |
| 828 | check->setGenerator(castFailure); |
| 829 | } else { |
| 830 | BasicBlock* successBlock = m_blockInsertionSet.insertBefore(m_block); |
| 831 | currentBlock->appendNew<Value>(m_proc, Branch, m_origin, condition); |
| 832 | currentBlock->setSuccessors(FrequentedBlock(falseBlock), FrequentedBlock(successBlock)); |
| 833 | return successBlock; |
| 834 | } |
| 835 | return currentBlock; |
| 836 | }; |
| 837 | |
| 838 | BasicBlock* currentBlock = before; |
| 839 | before->replaceLastWithNew<Value>(m_proc, Nop, m_origin); |
| 840 | |
| 841 | // Determine if we can use trapping loads to fuse the null check |
| 842 | // This optimization only applies for Cast mode, !allowNull, user-defined types |
| 843 | auto castAccessOffset = [&]() -> std::optional<ptrdiff_t> { |
| 844 | if (!isCast) |
| 845 | return std::nullopt; |
| 846 | if (allowNull) |
| 847 | return std::nullopt; |
| 848 | if (Wasm::typeIndexIsType(static_cast<Wasm::TypeIndex>(targetHeapType))) |
| 849 | return std::nullopt; |
| 850 | |
| 851 | if (targetIsFunction) |
| 852 | return WebAssemblyFunctionBase::offsetOfRTT(); |
| 853 | |
| 854 | if (!definitelyIsCellOrNull) |
| 855 | return std::nullopt; |
| 856 | if (!definitelyIsWasmGCObjectOrNull) |
| 857 | return JSCell::typeInfoTypeOffset(); |
| 858 | return JSCell::structureIDOffset(); |
| 859 | }; |
| 860 | |
| 861 | bool canTrap = false; |
| 862 | auto wrapTrapping = [&](B3::Kind input) -> B3::Kind { |
| 863 | if (canTrap) { |
| 864 | canTrap = false; |
| 865 | return trapping(input); |
| 866 | } |
| 867 | return input; |
| 868 | }; |
| 869 | |
| 870 | if (referenceIsNullable) { |
| 871 | if (auto offset = castAccessOffset(); offset && offset.value() <= static_cast<ptrdiff_t>(Wasm::maxAcceptableOffsetForNullReference())) |
| 872 | canTrap = true; |
| 873 | else { |
| 874 | BasicBlock* nullCase = m_blockInsertionSet.insertBefore(m_block); |
| 875 | BasicBlock* nonNullCase = m_blockInsertionSet.insertBefore(m_block); |
| 876 | |
| 877 | Value* isNull = currentBlock->appendNew<Value>(m_proc, Equal, m_origin, reference, currentBlock->appendNew<Const64Value>(m_proc, m_origin, JSValue::encode(jsNull()))); |
| 878 | currentBlock->appendNew<Value>(m_proc, Branch, m_origin, isNull); |
| 879 | currentBlock->setSuccessors(FrequentedBlock(nullCase), FrequentedBlock(nonNullCase)); |
| 880 | |
| 881 | // Handle null case |
| 882 | if (isCast) { |
| 883 | if (!allowNull) { |
| 884 | PatchpointValue* throwException = nullCase->appendNew<PatchpointValue>(m_proc, Void, m_origin); |
| 885 | throwException->setGenerator(castFailure); |
| 886 | } |
| 887 | nullCase->appendNew<Value>(m_proc, Jump, m_origin); |
| 888 | nullCase->setSuccessors(FrequentedBlock(continuation)); |
| 889 | } else { |
| 890 | // Test: null -> allowNull ? true : false |
| 891 | BasicBlock* nextBlock = allowNull ? trueBlock : falseBlock; |
| 892 | nullCase->appendNew<Value>(m_proc, Jump, m_origin); |
| 893 | nullCase->setSuccessors(FrequentedBlock(nextBlock)); |
| 894 | } |
| 895 | |
| 896 | currentBlock = nonNullCase; |
| 897 | } |
| 898 | } |
| 899 | |
| 900 | if (Wasm::typeIndexIsType(static_cast<Wasm::TypeIndex>(targetHeapType))) { |
| 901 | switch (static_cast<Wasm::TypeKind>(targetHeapType)) { |
| 902 | case Wasm::TypeKind::Funcref: |
| 903 | case Wasm::TypeKind::Externref: |
| 904 | case Wasm::TypeKind::Anyref: |
| 905 | case Wasm::TypeKind::Exnref: |
| 906 | // Top types - always pass |
| 907 | break; |
| 908 | case Wasm::TypeKind::Noneref: |
| 909 | case Wasm::TypeKind::Nofuncref: |
| 910 | case Wasm::TypeKind::Noexternref: |
| 911 | case Wasm::TypeKind::Noexnref: |
| 912 | // Bottom types - always fail |
| 913 | if (isCast) { |
| 914 | PatchpointValue* throwException = currentBlock->appendNew<PatchpointValue>(m_proc, Void, m_origin); |
| 915 | throwException->setGenerator(castFailure); |
| 916 | } else { |
| 917 | currentBlock->appendNew<Value>(m_proc, Jump, m_origin); |
| 918 | currentBlock->setSuccessors(FrequentedBlock(falseBlock)); |
| 919 | currentBlock = m_blockInsertionSet.insertBefore(m_block); |
| 920 | } |
| 921 | break; |
| 922 | case Wasm::TypeKind::Eqref: { |
| 923 | // Check for i31 or GC object |
| 924 | BasicBlock* checkObject = m_blockInsertionSet.insertBefore(m_block); |
| 925 | BasicBlock* endBlock = isCast ? continuation : trueBlock; |
| 926 | |
| 927 | // i31 check: !Below(value, NumberTag) && in range |
| 928 | // If Below(value, NumberTag) is TRUE, it's NOT an i31, go to checkObject |
| 929 | // If FALSE, it might be an i31, check the range |
| 930 | Value* belowNumberTag = currentBlock->appendNew<Value>(m_proc, Below, m_origin, reference, currentBlock->appendNew<Const64Value>(m_proc, m_origin, JSValue::NumberTag)); |
| 931 | BasicBlock* checkI31Range = m_blockInsertionSet.insertBefore(m_block); |
| 932 | currentBlock->appendNew<Value>(m_proc, Branch, m_origin, belowNumberTag); |
| 933 | currentBlock->setSuccessors(FrequentedBlock(checkObject), FrequentedBlock(checkI31Range)); |
| 934 | |
| 935 | Value* untagged = checkI31Range->appendNew<Value>(m_proc, Trunc, m_origin, reference); |
| 936 | Value* gtMax = checkI31Range->appendNew<Value>(m_proc, GreaterThan, m_origin, untagged, checkI31Range->appendNew<Const32Value>(m_proc, m_origin, Wasm::maxI31ref)); |
| 937 | Value* ltMin = checkI31Range->appendNew<Value>(m_proc, LessThan, m_origin, untagged, checkI31Range->appendNew<Const32Value>(m_proc, m_origin, Wasm::minI31ref)); |
| 938 | Value* outOfRange = checkI31Range->appendNew<Value>(m_proc, BitOr, m_origin, gtMax, ltMin); |
| 939 | checkI31Range->appendNew<Value>(m_proc, Branch, m_origin, outOfRange); |
| 940 | checkI31Range->setSuccessors(FrequentedBlock(checkObject), FrequentedBlock(endBlock)); |
| 941 | |
| 942 | // Check if it's a GC object |
| 943 | currentBlock = checkObject; |
| 944 | if (!definitelyIsCellOrNull) { |
| 945 | Value* notCellMask = currentBlock->appendNew<Value>(m_proc, BitAnd, m_origin, reference, currentBlock->appendNew<Const64Value>(m_proc, m_origin, JSValue::NotCellMask)); |
| 946 | currentBlock = emitCheckOrBranch(notCellMask, currentBlock); |
| 947 | } |
| 948 | if (!definitelyIsWasmGCObjectOrNull) { |
| 949 | Value* jsType = currentBlock->appendNew<MemoryValue>(m_proc, Load8Z, Int32, m_origin, reference, safeCast<int32_t>(JSCell::typeInfoTypeOffset())); |
| 950 | Value* notGCObject = currentBlock->appendNew<Value>(m_proc, NotEqual, m_origin, jsType, currentBlock->appendNew<Const32Value>(m_proc, m_origin, JSType::WebAssemblyGCObjectType)); |
| 951 | currentBlock = emitCheckOrBranch(notGCObject, currentBlock); |
| 952 | } |
| 953 | break; |
| 954 | } |
| 955 | case Wasm::TypeKind::I31ref: { |
| 956 | // Check !Below(value, NumberTag) && in range |
| 957 | // If Below(value, NumberTag) is TRUE → fail (not an i31) |
| 958 | Value* belowNumberTag = currentBlock->appendNew<Value>(m_proc, Below, m_origin, reference, |
| 959 | currentBlock->appendNew<Const64Value>(m_proc, m_origin, JSValue::NumberTag)); |
| 960 | currentBlock = emitCheckOrBranch(belowNumberTag, currentBlock); |
| 961 | |
| 962 | Value* untagged = currentBlock->appendNew<Value>(m_proc, Trunc, m_origin, reference); |
| 963 | Value* gtMax = currentBlock->appendNew<Value>(m_proc, GreaterThan, m_origin, untagged, currentBlock->appendNew<Const32Value>(m_proc, m_origin, Wasm::maxI31ref)); |
| 964 | currentBlock = emitCheckOrBranch(gtMax, currentBlock); |
| 965 | Value* ltMin = currentBlock->appendNew<Value>(m_proc, LessThan, m_origin, untagged, currentBlock->appendNew<Const32Value>(m_proc, m_origin, Wasm::minI31ref)); |
| 966 | currentBlock = emitCheckOrBranch(ltMin, currentBlock); |
| 967 | break; |
| 968 | } |
| 969 | case Wasm::TypeKind::Arrayref: |
| 970 | case Wasm::TypeKind::Structref: { |
| 971 | // Check cell, GC object type, and RTT kind |
| 972 | if (!definitelyIsCellOrNull) { |
| 973 | Value* notCellMask = currentBlock->appendNew<Value>(m_proc, BitAnd, m_origin, reference, currentBlock->appendNew<Const64Value>(m_proc, m_origin, JSValue::NotCellMask)); |
| 974 | currentBlock = emitCheckOrBranch(notCellMask, currentBlock); |
| 975 | } |
| 976 | if (!definitelyIsWasmGCObjectOrNull) { |
| 977 | Value* jsType = currentBlock->appendNew<MemoryValue>(m_proc, Load8Z, Int32, m_origin, reference, safeCast<int32_t>(JSCell::typeInfoTypeOffset())); |
| 978 | Value* notGCObject = currentBlock->appendNew<Value>(m_proc, NotEqual, m_origin, jsType, currentBlock->appendNew<Const32Value>(m_proc, m_origin, JSType::WebAssemblyGCObjectType)); |
| 979 | currentBlock = emitCheckOrBranch(notGCObject, currentBlock); |
| 980 | } |
| 981 | |
| 982 | // Load RTT and check kind |
| 983 | Value* structureID = currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, m_origin, reference, safeCast<int32_t>(JSCell::structureIDOffset())); |
| 984 | Value* structure = currentBlock->appendNew<Value>(m_proc, BitOr, m_origin, currentBlock->appendNew<Value>(m_proc, ZExt32, m_origin, structureID), currentBlock->appendNew<Const64Value>(m_proc, m_origin, structureIDBase())); |
| 985 | MemoryValue* rttLoad = currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), m_origin, structure, safeCast<int32_t>(WebAssemblyGCStructure::offsetOfRTT())); |
| 986 | rttLoad->setControlDependent(false); |
| 987 | MemoryValue* kindLoad = currentBlock->appendNew<MemoryValue>(m_proc, Load8Z, Int32, m_origin, rttLoad, safeCast<int32_t>(Wasm::RTT::offsetOfKind())); |
| 988 | kindLoad->setControlDependent(false); |
| 989 | Wasm::RTTKind expectedKind = static_cast<Wasm::TypeKind>(targetHeapType) == Wasm::TypeKind::Arrayref ? Wasm::RTTKind::Array : Wasm::RTTKind::Struct; |
| 990 | Value* wrongKind = currentBlock->appendNew<Value>(m_proc, NotEqual, m_origin, kindLoad, currentBlock->appendNew<Const32Value>(m_proc, m_origin, static_cast<uint8_t>(expectedKind))); |
| 991 | currentBlock = emitCheckOrBranch(wrongKind, currentBlock); |
| 992 | break; |
| 993 | } |
| 994 | default: |
| 995 | RELEASE_ASSERT_NOT_REACHED()do { WTF::isIntegralOrPointerType(); compilerFenceForCrash(); WTFCrashWithInfo(995, "./b3/B3LowerMacros.cpp", __PRETTY_FUNCTION__ , 586 ); } while (false); |
| 996 | } |
| 997 | } else { |
| 998 | // User-defined type: RTT comparison |
| 999 | ([&] { |
| 1000 | ASSERT(targetRTT)((void)0); |
| 1001 | |
| 1002 | Value* rtt; |
| 1003 | if (targetIsFunction) { |
| 1004 | // Function type: load RTT directly from WebAssemblyFunctionBase::offsetOfRTT() |
| 1005 | // The typechecker ensures valid types, so no cell/GC object check needed |
| 1006 | rtt = currentBlock->appendNew<MemoryValue>(m_proc, wrapTrapping(Load), pointerType(), m_origin, reference, safeCast<int32_t>(WebAssemblyFunctionBase::offsetOfRTT())); |
| 1007 | } else { |
| 1008 | // GC object (struct/array): check cell, GC object type, load from structure |
| 1009 | if (!definitelyIsCellOrNull) { |
| 1010 | Value* notCellMask = currentBlock->appendNew<Value>(m_proc, BitAnd, m_origin, reference, currentBlock->appendNew<Const64Value>(m_proc, m_origin, JSValue::NotCellMask)); |
| 1011 | currentBlock = emitCheckOrBranch(notCellMask, currentBlock); |
| 1012 | } |
| 1013 | if (!definitelyIsWasmGCObjectOrNull) { |
| 1014 | MemoryValue* jsType = currentBlock->appendNew<MemoryValue>(m_proc, wrapTrapping(Load8Z), Int32, m_origin, reference, safeCast<int32_t>(JSCell::typeInfoTypeOffset())); |
| 1015 | Value* notGCObject = currentBlock->appendNew<Value>(m_proc, NotEqual, m_origin, jsType, currentBlock->appendNew<Const32Value>(m_proc, m_origin, JSType::WebAssemblyGCObjectType)); |
| 1016 | currentBlock = emitCheckOrBranch(notGCObject, currentBlock); |
| 1017 | } |
| 1018 | |
| 1019 | // Load structure and RTT |
| 1020 | MemoryValue* structureIDLoad = currentBlock->appendNew<MemoryValue>(m_proc, wrapTrapping(Load), Int32, m_origin, reference, safeCast<int32_t>(JSCell::structureIDOffset())); |
| 1021 | Value* structure = currentBlock->appendNew<Value>(m_proc, BitOr, m_origin, currentBlock->appendNew<Value>(m_proc, ZExt32, m_origin, structureIDLoad), currentBlock->appendNew<Const64Value>(m_proc, m_origin, structureIDBase())); |
| 1022 | |
| 1023 | // Fast path: check inlined type display if display size is small enough |
| 1024 | if (targetRTT->displaySizeExcludingThis() < WebAssemblyGCStructure::inlinedTypeDisplaySize) { |
| 1025 | Value* targetRTTPointer = currentBlock->appendNew<ConstPtrValue>(m_proc, m_origin, std::bit_cast<uintptr_t>(targetRTT)); |
| 1026 | MemoryValue* pointer = currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), m_origin, structure, safeCast<int32_t>(WebAssemblyGCStructure::offsetOfInlinedTypeDisplay() + targetRTT->displaySizeExcludingThis() * sizeof(RefPtr<const Wasm::RTT>))); |
| 1027 | pointer->setReadsMutability(Mutability::Immutable); |
| 1028 | pointer->setControlDependent(false); |
| 1029 | Value* notEqual = currentBlock->appendNew<Value>(m_proc, NotEqual, m_origin, pointer, targetRTTPointer); |
| 1030 | currentBlock = emitCheckOrBranch(notEqual, currentBlock); |
| 1031 | return; |
| 1032 | } |
| 1033 | |
| 1034 | rtt = currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), m_origin, structure, safeCast<int32_t>(WebAssemblyGCStructure::offsetOfRTT())); |
| 1035 | rtt->as<MemoryValue>()->setControlDependent(false); |
| 1036 | } |
| 1037 | |
| 1038 | // Common RTT comparison path (for functions and GC objects with large display) |
| 1039 | { |
| 1040 | Value* targetRTTPointer = currentBlock->appendNew<ConstPtrValue>(m_proc, m_origin, std::bit_cast<uintptr_t>(targetRTT)); |
| 1041 | |
| 1042 | // Fast path: pointer equality |
| 1043 | BasicBlock* equalBlock = isCast ? continuation : trueBlock; |
| 1044 | BasicBlock* slowPath = m_blockInsertionSet.insertBefore(m_block); |
| 1045 | Value* isEqual = currentBlock->appendNew<Value>(m_proc, Equal, m_origin, rtt, targetRTTPointer); |
| 1046 | currentBlock->appendNew<Value>(m_proc, Branch, m_origin, isEqual); |
| 1047 | currentBlock->setSuccessors(FrequentedBlock(equalBlock), FrequentedBlock(slowPath)); |
| 1048 | |
| 1049 | currentBlock = slowPath; |
| 1050 | |
| 1051 | if (targetIsFinal) { |
| 1052 | // Final type: pointer equality is the only check needed |
| 1053 | if (isCast) { |
| 1054 | PatchpointValue* throwException = currentBlock->appendNew<PatchpointValue>(m_proc, Void, m_origin); |
| 1055 | throwException->setGenerator(castFailure); |
| 1056 | } else { |
| 1057 | currentBlock->appendNew<Value>(m_proc, Jump, m_origin); |
| 1058 | currentBlock->setSuccessors(FrequentedBlock(falseBlock)); |
| 1059 | currentBlock = m_blockInsertionSet.insertBefore(m_block); |
| 1060 | } |
| 1061 | } else { |
| 1062 | // Non-final type: check display hierarchy |
| 1063 | MemoryValue* displaySizeLoad = currentBlock->appendNew<MemoryValue>(m_proc, Load, Int32, m_origin, rtt, safeCast<int32_t>(Wasm::RTT::offsetOfDisplaySizeExcludingThis())); |
| 1064 | displaySizeLoad->setControlDependent(false); |
| 1065 | Value* tooSmall = currentBlock->appendNew<Value>(m_proc, BelowEqual, m_origin, displaySizeLoad, currentBlock->appendNew<Const32Value>(m_proc, m_origin, targetRTT->displaySizeExcludingThis())); |
| 1066 | currentBlock = emitCheckOrBranch(tooSmall, currentBlock); |
| 1067 | |
| 1068 | MemoryValue* pointer = currentBlock->appendNew<MemoryValue>(m_proc, Load, pointerType(), m_origin, rtt, safeCast<int32_t>(Wasm::RTT::offsetOfData() + targetRTT->displaySizeExcludingThis() * sizeof(RefPtr<const Wasm::RTT>))); |
| 1069 | pointer->setReadsMutability(Mutability::Immutable); |
| 1070 | pointer->setControlDependent(false); |
| 1071 | Value* notEqualToTarget = currentBlock->appendNew<Value>(m_proc, NotEqual, m_origin, pointer, targetRTTPointer); |
| 1072 | currentBlock = emitCheckOrBranch(notEqualToTarget, currentBlock); |
| 1073 | } |
| 1074 | } |
| 1075 | }()); |
| 1076 | } |
| 1077 | |
| 1078 | // Final jump to continuation |
| 1079 | if (isCast) { |
| 1080 | currentBlock->appendNew<Value>(m_proc, Jump, m_origin); |
| 1081 | currentBlock->setSuccessors(FrequentedBlock(continuation)); |
| 1082 | m_value->replaceWithIdentity(reference); |
| 1083 | } else { |
| 1084 | currentBlock->appendNew<Value>(m_proc, Jump, m_origin); |
| 1085 | currentBlock->setSuccessors(FrequentedBlock(trueBlock)); |
| 1086 | |
| 1087 | // Create phi for result |
| 1088 | int32_t trueValue = shouldNegate ? 0 : 1; |
| 1089 | int32_t falseValue = shouldNegate ? 1 : 0; |
| 1090 | |
| 1091 | UpsilonValue* trueUpsilon = trueBlock->appendNew<UpsilonValue>(m_proc, m_origin, trueBlock->appendNew<Const32Value>(m_proc, m_origin, trueValue)); |
| 1092 | trueBlock->appendNew<Value>(m_proc, Jump, m_origin); |
| 1093 | trueBlock->setSuccessors(FrequentedBlock(continuation)); |
| 1094 | |
| 1095 | UpsilonValue* falseUpsilon = falseBlock->appendNew<UpsilonValue>(m_proc, m_origin, falseBlock->appendNew<Const32Value>(m_proc, m_origin, falseValue)); |
| 1096 | falseBlock->appendNew<Value>(m_proc, Jump, m_origin); |
| 1097 | falseBlock->setSuccessors(FrequentedBlock(continuation)); |
| 1098 | |
| 1099 | Value* phi = m_insertionSet.insert<Value>(m_index, Phi, Int32, m_origin); |
| 1100 | trueUpsilon->setPhi(phi); |
| 1101 | falseUpsilon->setPhi(phi); |
| 1102 | |
| 1103 | m_value->replaceWithIdentity(phi); |
| 1104 | } |
| 1105 | |
| 1106 | before->updatePredecessorsAfter(); |
| 1107 | m_changed = true; |
| 1108 | break; |
| 1109 | } |
| 1110 | |
| 1111 | default: |
| 1112 | break; |
| 1113 | } |
| 1114 | } |
| 1115 | m_insertionSet.execute(m_block); |
| 1116 | } |
| 1117 | |
| 1118 | void invertedComparisonByXor(Opcode opcodeToBeInverted, Value* lhs, Value* rhs) |
| 1119 | { |
| 1120 | v128_t allOnes; |
| 1121 | allOnes.u64x2[0] = 0xffffffffffffffff; |
| 1122 | allOnes.u64x2[1] = 0xffffffffffffffff; |
| 1123 | Value* allOnesConst = m_insertionSet.insert<Const128Value>(m_index, m_origin, allOnes); |
| 1124 | Value* compareResult = m_insertionSet.insert<SIMDValue>(m_index, m_origin, opcodeToBeInverted, B3::V128, m_value->as<SIMDValue>()->simdInfo(), lhs, rhs); |
| 1125 | Value* result = m_insertionSet.insert<SIMDValue>(m_index, m_origin, VectorXor, B3::V128, SIMDLane::v128, SIMDSignMode::None, compareResult, allOnesConst); |
| 1126 | m_value->replaceWithIdentity(result); |
| 1127 | m_changed = true; |
| 1128 | } |
| 1129 | |
| 1130 | #if USE(JSVALUE32_64)(defined USE_JSVALUE32_64 && USE_JSVALUE32_64) |
| 1131 | Value* callDivModHelper(BasicBlock* block, Opcode nonChillOpcode, Value* num, Value* den) |
| 1132 | { |
| 1133 | Type type = num->type(); |
| 1134 | Value* functionAddress; |
| 1135 | if (nonChillOpcode == Div) { |
| 1136 | if (m_value->type() == Int64) |
| 1137 | functionAddress = block->appendNew<ConstPtrValue>(m_proc, m_origin, tagCFunction<OperationPtrTag>(Math::i64_div_s)); |
| 1138 | else |
| 1139 | functionAddress = block->appendNew<ConstPtrValue>(m_proc, m_origin, tagCFunction<OperationPtrTag>(Math::i32_div_s)); |
| 1140 | } else { |
| 1141 | if (m_value->type() == Int64) |
| 1142 | functionAddress = block->appendNew<ConstPtrValue>(m_proc, m_origin, tagCFunction<OperationPtrTag>(Math::i64_rem_s)); |
| 1143 | else |
| 1144 | functionAddress = block->appendNew<ConstPtrValue>(m_proc, m_origin, tagCFunction<OperationPtrTag>(Math::i32_rem_s)); |
| 1145 | } |
| 1146 | return block->appendNew<CCallValue>(m_proc, type, m_origin, Effects::none(), functionAddress, num, den); |
| 1147 | } |
| 1148 | #else |
| 1149 | Value* callDivModHelper(BasicBlock*, Opcode, Value*, Value*) |
| 1150 | { |
| 1151 | RELEASE_ASSERT_NOT_REACHED()do { WTF::isIntegralOrPointerType(); compilerFenceForCrash(); WTFCrashWithInfo(1151, "./b3/B3LowerMacros.cpp", __PRETTY_FUNCTION__ , 587 ); } while (false); |
| 1152 | } |
| 1153 | #endif |
| 1154 | void makeDivisionChill(Opcode nonChillOpcode) |
| 1155 | { |
| 1156 | ASSERT(nonChillOpcode == Div || nonChillOpcode == Mod)((void)0); |
| 1157 | |
| 1158 | // ARM supports this instruction natively. |
| 1159 | if (isARM64()) |
| 1160 | return; |
| 1161 | |
| 1162 | // We implement "res = Div<Chill>/Mod<Chill>(num, den)" as follows: |
| 1163 | // |
| 1164 | // if (den + 1 <=_unsigned 1) { |
| 1165 | // if (!den) { |
| 1166 | // res = 0; |
| 1167 | // goto done; |
| 1168 | // } |
| 1169 | // if (num == -2147483648) { |
| 1170 | // res = isDiv ? num : 0; |
| 1171 | // goto done; |
| 1172 | // } |
| 1173 | // } |
| 1174 | // res = num (/ or %) dev; |
| 1175 | // done: |
| 1176 | m_changed = true; |
| 1177 | |
| 1178 | Value* num = m_value->child(0); |
| 1179 | Value* den = m_value->child(1); |
| 1180 | |
| 1181 | Value* one = m_insertionSet.insertIntConstant(m_index, m_value, 1); |
| 1182 | Value* isDenOK = m_insertionSet.insert<Value>( |
| 1183 | m_index, Above, m_origin, |
| 1184 | m_insertionSet.insert<Value>(m_index, Add, m_origin, den, one), |
| 1185 | one); |
| 1186 | |
| 1187 | BasicBlock* before = m_blockInsertionSet.splitForward(m_block, m_index, &m_insertionSet); |
| 1188 | |
| 1189 | BasicBlock* normalDivCase = m_blockInsertionSet.insertBefore(m_block); |
| 1190 | BasicBlock* shadyDenCase = m_blockInsertionSet.insertBefore(m_block); |
| 1191 | BasicBlock* zeroDenCase = m_blockInsertionSet.insertBefore(m_block); |
| 1192 | BasicBlock* neg1DenCase = m_blockInsertionSet.insertBefore(m_block); |
| 1193 | BasicBlock* intMinCase = m_blockInsertionSet.insertBefore(m_block); |
| 1194 | |
| 1195 | before->replaceLastWithNew<Value>(m_proc, Branch, m_origin, isDenOK); |
| 1196 | before->setSuccessors( |
| 1197 | FrequentedBlock(normalDivCase, FrequencyClass::Normal), |
| 1198 | FrequentedBlock(shadyDenCase, FrequencyClass::Rare)); |
| 1199 | |
| 1200 | Value* innerResult; |
| 1201 | if (isARM_THUMB2() && (m_value->type() == Int64 || m_value->type() == Int32)) |
| 1202 | innerResult = callDivModHelper(normalDivCase, nonChillOpcode, num, den); |
| 1203 | else |
| 1204 | innerResult = normalDivCase->appendNew<Value>(m_proc, nonChillOpcode, m_origin, num, den); |
| 1205 | UpsilonValue* normalResult = normalDivCase->appendNew<UpsilonValue>( |
| 1206 | m_proc, m_origin, |
| 1207 | innerResult); |
| 1208 | normalDivCase->appendNew<Value>(m_proc, Jump, m_origin); |
| 1209 | normalDivCase->setSuccessors(FrequentedBlock(m_block)); |
| 1210 | |
| 1211 | shadyDenCase->appendNew<Value>(m_proc, Branch, m_origin, den); |
| 1212 | shadyDenCase->setSuccessors( |
| 1213 | FrequentedBlock(neg1DenCase, FrequencyClass::Normal), |
| 1214 | FrequentedBlock(zeroDenCase, FrequencyClass::Rare)); |
| 1215 | |
| 1216 | UpsilonValue* zeroResult = zeroDenCase->appendNew<UpsilonValue>( |
| 1217 | m_proc, m_origin, |
| 1218 | zeroDenCase->appendIntConstant(m_proc, m_value, 0)); |
| 1219 | zeroDenCase->appendNew<Value>(m_proc, Jump, m_origin); |
| 1220 | zeroDenCase->setSuccessors(FrequentedBlock(m_block)); |
| 1221 | |
| 1222 | int64_t badNumeratorConst = 0; |
| 1223 | switch (m_value->type().kind()) { |
| 1224 | case Int32: |
| 1225 | badNumeratorConst = std::numeric_limits<int32_t>::min(); |
| 1226 | break; |
| 1227 | case Int64: |
| 1228 | badNumeratorConst = std::numeric_limits<int64_t>::min(); |
| 1229 | break; |
| 1230 | default: |
| 1231 | ASSERT_NOT_REACHED()((void)0); |
| 1232 | badNumeratorConst = 0; |
| 1233 | } |
| 1234 | |
| 1235 | Value* badNumerator = |
| 1236 | neg1DenCase->appendIntConstant(m_proc, m_value, badNumeratorConst); |
| 1237 | |
| 1238 | neg1DenCase->appendNew<Value>( |
| 1239 | m_proc, Branch, m_origin, |
| 1240 | neg1DenCase->appendNew<Value>( |
| 1241 | m_proc, Equal, m_origin, num, badNumerator)); |
| 1242 | neg1DenCase->setSuccessors( |
| 1243 | FrequentedBlock(intMinCase, FrequencyClass::Rare), |
| 1244 | FrequentedBlock(normalDivCase, FrequencyClass::Normal)); |
| 1245 | |
| 1246 | Value* intMinResult = nonChillOpcode == Div ? badNumerator : intMinCase->appendIntConstant(m_proc, m_value, 0); |
| 1247 | UpsilonValue* intMinResultUpsilon = intMinCase->appendNew<UpsilonValue>( |
| 1248 | m_proc, m_origin, intMinResult); |
| 1249 | intMinCase->appendNew<Value>(m_proc, Jump, m_origin); |
| 1250 | intMinCase->setSuccessors(FrequentedBlock(m_block)); |
| 1251 | |
| 1252 | Value* phi = m_insertionSet.insert<Value>( |
| 1253 | m_index, Phi, m_value->type(), m_origin); |
| 1254 | normalResult->setPhi(phi); |
| 1255 | zeroResult->setPhi(phi); |
| 1256 | intMinResultUpsilon->setPhi(phi); |
| 1257 | |
| 1258 | m_value->replaceWithIdentity(phi); |
| 1259 | before->updatePredecessorsAfter(); |
| 1260 | } |
| 1261 | |
| 1262 | void recursivelyBuildSwitch( |
| 1263 | const Vector<SwitchCase>& cases, FrequentedBlock fallThrough, unsigned start, bool hardStart, |
| 1264 | unsigned end, BasicBlock* before) |
| 1265 | { |
| 1266 | Value* child = m_value->child(0); |
| 1267 | Type type = child->type(); |
| 1268 | |
| 1269 | // It's a good idea to use a table-based switch in some cases: the number of cases has to be |
| 1270 | // large enough and they have to be dense enough. This could probably be improved a lot. For |
| 1271 | // example, we could still use a jump table in cases where the inputs are sparse so long as we |
| 1272 | // shift off the uninteresting bits. On the other hand, it's not clear that this would |
| 1273 | // actually be any better than what we have done here and it's not clear that it would be |
| 1274 | // better than a binary switch. |
| 1275 | const unsigned minCasesForTable = 7; |
| 1276 | const unsigned densityLimit = 4; |
| 1277 | if (end - start >= minCasesForTable) { |
| 1278 | int64_t firstValue = cases[start].caseValue(); |
| 1279 | int64_t lastValue = cases[end - 1].caseValue(); |
| 1280 | if ((lastValue - firstValue + 1) / (end - start) < densityLimit) { |
| 1281 | BasicBlock* switchBlock = m_blockInsertionSet.insertAfter(m_block); |
| 1282 | Value* index = before->appendNew<Value>( |
| 1283 | m_proc, Sub, m_origin, child, |
| 1284 | before->appendIntConstant(m_proc, m_origin, type, firstValue)); |
| 1285 | before->appendNew<Value>( |
| 1286 | m_proc, Branch, m_origin, |
| 1287 | before->appendNew<Value>( |
| 1288 | m_proc, Above, m_origin, index, |
| 1289 | before->appendIntConstant(m_proc, m_origin, type, lastValue - firstValue))); |
| 1290 | before->setSuccessors(fallThrough, FrequentedBlock(switchBlock)); |
| 1291 | |
| 1292 | size_t tableSize = lastValue - firstValue + 1; |
| 1293 | |
| 1294 | if (index->type() != pointerType() && index->type() == Int32) |
| 1295 | index = switchBlock->appendNew<Value>(m_proc, ZExt32, m_origin, index); |
| 1296 | |
| 1297 | PatchpointValue* patchpoint = |
| 1298 | switchBlock->appendNew<PatchpointValue>(m_proc, Void, m_origin); |
| 1299 | |
| 1300 | // Even though this loads from the jump table, the jump table is immutable. For the |
| 1301 | // purpose of alias analysis, reading something immutable is like reading nothing. |
| 1302 | patchpoint->effects = Effects(); |
| 1303 | patchpoint->effects.terminal = true; |
| 1304 | |
| 1305 | patchpoint->appendSomeRegister(index); |
| 1306 | patchpoint->numGPScratchRegisters = 2; |
| 1307 | // Technically, we don't have to clobber macro registers on X86_64. This is probably |
| 1308 | // OK though. |
| 1309 | patchpoint->clobber(RegisterSetBuilder::macroClobberedGPRs()); |
| 1310 | |
| 1311 | BitVector handledIndices; |
| 1312 | for (unsigned i = start; i < end; ++i) { |
| 1313 | FrequentedBlock block = cases[i].target(); |
| 1314 | int64_t value = cases[i].caseValue(); |
| 1315 | switchBlock->appendSuccessor(block); |
| 1316 | size_t index = value - firstValue; |
| 1317 | ASSERT(!handledIndices.get(index))((void)0); |
| 1318 | handledIndices.set(index); |
| 1319 | } |
| 1320 | |
| 1321 | bool hasUnhandledIndex = false; |
| 1322 | for (unsigned i = 0; i < tableSize; ++i) { |
| 1323 | if (!handledIndices.get(i)) { |
| 1324 | hasUnhandledIndex = true; |
| 1325 | break; |
| 1326 | } |
| 1327 | } |
| 1328 | |
| 1329 | if (hasUnhandledIndex) |
| 1330 | switchBlock->appendSuccessor(fallThrough); |
| 1331 | |
| 1332 | patchpoint->setGenerator( |
| 1333 | [=] (CCallHelpers& jit, const StackmapGenerationParams& params) { |
| 1334 | AllowMacroScratchRegisterUsage allowScratch(jit); |
| 1335 | |
| 1336 | using JumpTableCodePtr = CodePtr<JSSwitchPtrTag>; |
| 1337 | JumpTableCodePtr* jumpTable = static_cast<JumpTableCodePtr*>( |
| 1338 | params.proc().addDataSection(sizeof(JumpTableCodePtr) * tableSize)); |
| 1339 | |
| 1340 | GPRReg index = params[0].gpr(); |
| 1341 | GPRReg scratch = params.gpScratch(0); |
| 1342 | |
| 1343 | jit.move(CCallHelpers::TrustedImmPtr(jumpTable), scratch); |
| 1344 | jit.loadPtr(CCallHelpers::BaseIndex(scratch, index, CCallHelpers::ScalePtr), scratch); |
| 1345 | jit.farJump(scratch, JSSwitchPtrTag); |
| 1346 | |
| 1347 | // These labels are guaranteed to be populated before either late paths or |
| 1348 | // link tasks run. |
| 1349 | Vector<Box<CCallHelpers::Label>> labels = params.successorLabels(); |
| 1350 | |
| 1351 | jit.addLinkTask( |
| 1352 | [=] (LinkBuffer& linkBuffer) { |
| 1353 | if (hasUnhandledIndex) { |
| 1354 | JumpTableCodePtr fallThrough = linkBuffer.locationOf<JSSwitchPtrTag>(*labels.last()); |
| 1355 | for (unsigned i = tableSize; i--;) |
| 1356 | jumpTable[i] = fallThrough; |
| 1357 | } |
| 1358 | |
| 1359 | unsigned labelIndex = 0; |
| 1360 | for (unsigned tableIndex : handledIndices) |
| 1361 | jumpTable[tableIndex] = linkBuffer.locationOf<JSSwitchPtrTag>(*labels[labelIndex++]); |
| 1362 | }); |
| 1363 | }); |
| 1364 | return; |
| 1365 | } |
| 1366 | } |
| 1367 | |
| 1368 | // See comments in jit/BinarySwitch.cpp for a justification of this algorithm. The only |
| 1369 | // thing we do differently is that we don't use randomness. |
| 1370 | |
| 1371 | const unsigned leafThreshold = 3; |
| 1372 | |
| 1373 | unsigned size = end - start; |
| 1374 | |
| 1375 | if (size <= leafThreshold) { |
| 1376 | bool allConsecutive = false; |
| 1377 | |
| 1378 | if ((hardStart || (start && cases[start - 1].caseValue() == cases[start].caseValue() - 1)) |
| 1379 | && end < cases.size() |
| 1380 | && cases[end - 1].caseValue() == cases[end].caseValue() - 1) { |
| 1381 | allConsecutive = true; |
| 1382 | for (unsigned i = 0; i < size - 1; ++i) { |
| 1383 | if (cases[start + i].caseValue() + 1 != cases[start + i + 1].caseValue()) { |
| 1384 | allConsecutive = false; |
| 1385 | break; |
| 1386 | } |
| 1387 | } |
| 1388 | } |
| 1389 | |
| 1390 | unsigned limit = allConsecutive ? size - 1 : size; |
| 1391 | |
| 1392 | for (unsigned i = 0; i < limit; ++i) { |
| 1393 | BasicBlock* nextCheck = m_blockInsertionSet.insertAfter(m_block); |
| 1394 | before->appendNew<Value>( |
| 1395 | m_proc, Branch, m_origin, |
| 1396 | before->appendNew<Value>( |
| 1397 | m_proc, Equal, m_origin, child, |
| 1398 | before->appendIntConstant( |
| 1399 | m_proc, m_origin, type, |
| 1400 | cases[start + i].caseValue()))); |
| 1401 | before->setSuccessors(cases[start + i].target(), FrequentedBlock(nextCheck)); |
| 1402 | |
| 1403 | before = nextCheck; |
| 1404 | } |
| 1405 | |
| 1406 | before->appendNew<Value>(m_proc, Jump, m_origin); |
| 1407 | if (allConsecutive) |
| 1408 | before->setSuccessors(cases[end - 1].target()); |
| 1409 | else |
| 1410 | before->setSuccessors(fallThrough); |
| 1411 | return; |
| 1412 | } |
| 1413 | |
| 1414 | unsigned medianIndex = std::midpoint(start, end); |
| 1415 | |
| 1416 | BasicBlock* left = m_blockInsertionSet.insertAfter(m_block); |
| 1417 | BasicBlock* right = m_blockInsertionSet.insertAfter(m_block); |
| 1418 | |
| 1419 | before->appendNew<Value>( |
| 1420 | m_proc, Branch, m_origin, |
| 1421 | before->appendNew<Value>( |
| 1422 | m_proc, LessThan, m_origin, child, |
| 1423 | before->appendIntConstant( |
| 1424 | m_proc, m_origin, type, |
| 1425 | cases[medianIndex].caseValue()))); |
| 1426 | before->setSuccessors(FrequentedBlock(left), FrequentedBlock(right)); |
| 1427 | |
| 1428 | recursivelyBuildSwitch(cases, fallThrough, start, hardStart, medianIndex, left); |
| 1429 | recursivelyBuildSwitch(cases, fallThrough, medianIndex, true, end, right); |
| 1430 | } |
| 1431 | |
| 1432 | Procedure& m_proc; |
| 1433 | BlockInsertionSet m_blockInsertionSet; |
| 1434 | InsertionSet m_insertionSet; |
| 1435 | UseCounts m_useCounts; |
| 1436 | BasicBlock* m_block; |
| 1437 | unsigned m_index; |
| 1438 | Value* m_value; |
| 1439 | Origin m_origin; |
| 1440 | bool m_changed { false }; |
| 1441 | }; |
| 1442 | |
| 1443 | } // anonymous namespace |
| 1444 | |
| 1445 | bool lowerMacros(Procedure& proc) |
| 1446 | { |
| 1447 | PhaseScope phaseScope(proc, "B3::lowerMacros"_s); |
| 1448 | LowerMacros lowerMacros(proc); |
| 1449 | return lowerMacros.run(); |
| 1450 | } |
| 1451 | |
| 1452 | } } // namespace JSC::B3 |
| 1453 | |
| 1454 | WTF_ALLOW_UNSAFE_BUFFER_USAGE_ENDclang diagnostic pop |
| 1455 | |
| 1456 | #endif // ENABLE(B3_JIT) |