Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Select an option

  • Save alexcrichton/65e557ea5cd6dd915a6528010986281e to your computer and use it in GitHub Desktop.

Select an option

Save alexcrichton/65e557ea5cd6dd915a6528010986281e to your computer and use it in GitHub Desktop.
;; Run test case
test run
set opt_level=speed
target x86_64
function %my_fn(i128 sext, i16x8, f64, f32x4, i64x2, i128 sext, i32 sext, i64 sext, f32, i16 uext, i64x2, i16 sext, i32x4, i8 sext, i8x16, i8x16) -> i128 sext, i16x8, f64, f32x4, f32x4, i64x2, i8 uext, i8, f64x2, i32 uext, f32, i128 uext, i32 uext, i64 sext, f32, i16 uext fast {
ss0 = explicit_slot 65, align = 1024
ss1 = explicit_slot 62, align = 1024
ss2 = explicit_slot 16, align = 16
ss3 = explicit_slot 16, align = 16
ss4 = explicit_slot 16, align = 16
ss5 = explicit_slot 16, align = 16
ss6 = explicit_slot 8, align = 8
ss7 = explicit_slot 4, align = 4
ss8 = explicit_slot 2, align = 2
ss9 = explicit_slot 16, align = 16
ss10 = explicit_slot 16, align = 16
ss11 = explicit_slot 16, align = 16
ss12 = explicit_slot 8, align = 8
ss13 = explicit_slot 16, align = 16
sig0 = (f32) -> f32 system_v
sig1 = (f64) -> f64 system_v
sig2 = (f32) -> f32 system_v
sig3 = (f64) -> f64 system_v
sig4 = (f32) -> f32 system_v
sig5 = (f64) -> f64 system_v
fn0 = %CeilF32 sig0
fn1 = %CeilF64 sig1
fn2 = %FloorF32 sig2
fn3 = %FloorF64 sig3
fn4 = %TruncF32 sig4
fn5 = %TruncF64 sig5
const0 = 0xe38ea0e180e0e0e0e0e0e04161414141
const1 = 0x8ea0e18081e38ea0e18081e38ea0e180
const2 = 0xe38ea0e18081e38ea0e18081e38ea0e1
block0(v0: i128, v1: i16x8, v2: f64, v3: f32x4, v4: i64x2, v5: i128, v6: i32, v7: i64, v8: f32, v9: i16, v10: i64x2, v11: i16, v12: i32x4, v13: i8, v14: i8x16, v15: i8x16):
v161 -> v1
v199 -> v1
v684 -> v3
v707 -> v3
v708 -> v3
v223 -> v5
v680 -> v5
v703 -> v5
v704 -> v5
v172 -> v8
v208 -> v9
v563 -> v15
v648 -> v15
v694 -> v15
v695 -> v15
stack_store v3, ss2
stack_store v9, ss8
stack_store v15, ss10
v87 = vconst.i8x16 const0
v88 = vconst.f64x2 const1
v600 -> v88
v700 -> v88
v701 -> v88
stack_store v88, ss11 ; v88 = const1
v89 = f64const -0x1.1e38ea0e18081p-1015
v163 -> v89
v201 -> v89
v90 = iconst.i64 -9087262801474321949
v91 = iconst.i64 -8169281781998842208
v92 = iconcat v91, v90 ; v91 = -8169281781998842208, v90 = -9087262801474321949
v193 -> v92
v574 -> v92
v696 -> v92
v697 -> v92
v93 = vconst.f32x4 const2
v683 -> v93
v705 -> v93
v706 -> v93
stack_store v93, ss3 ; v93 = const2
v94 = vconst.f64x2 const1
v686 -> v94
v709 -> v94
v710 -> v94
stack_store v94, ss4 ; v94 = const1
v95 = iconst.i64 -9087262801474321949
v96 = iconst.i64 -8169281781998842208
v97 = iconcat v96, v95 ; v96 = -8169281781998842208, v95 = -9087262801474321949
v98 = vconst.f32x4 const2
v99 = vconst.f64x2 const1
v100 = f64const -0x1.1e38ea0e18080p-1015
v101 = iconst.i64 -8169281642955767325
v102 = iconst.i64 -8169281781998842208
v103 = iconcat v102, v101 ; v102 = -8169281781998842208, v101 = -8169281642955767325
v104 = f64const -0x1.1e38ea0e18081p-1015
v105 = iconst.i64 -9087262801474321949
v106 = iconst.i64 -8169281781998842208
v107 = iconcat v106, v105 ; v106 = -8169281781998842208, v105 = -9087262801474321949
v218 -> v107
v594 -> v107
v698 -> v107
v699 -> v107
v108 = vconst.f32x4 const2
v109 = iconst.i8 0
v110 = iconst.i16 0
v111 = iconst.i32 0
v112 = iconst.i64 0
v113 = uextend.i128 v112 ; v112 = 0
v114 = stack_addr.i64 ss1
store notrap heap v113, v114
v115 = stack_addr.i64 ss1+16
store notrap heap v113, v115
v116 = stack_addr.i64 ss1+32
store notrap heap v113, v116
v117 = stack_addr.i64 ss1+48
store notrap heap v112, v117 ; v112 = 0
v118 = stack_addr.i64 ss1+56
store notrap heap v111, v118 ; v111 = 0
v119 = stack_addr.i64 ss1+60
store notrap heap v110, v119 ; v110 = 0
v120 = stack_addr.i64 ss0
store notrap heap v113, v120
v121 = stack_addr.i64 ss0+16
store notrap heap v113, v121
v122 = stack_addr.i64 ss0+32
store notrap heap v113, v122
v123 = stack_addr.i64 ss0+48
store notrap heap v113, v123
v124 = stack_addr.i64 ss0+64
store notrap heap v109, v124 ; v109 = 0
v125 = uwiden_low v1
v126 = select v7, v89, v2 ; v89 = -0x1.1e38ea0e18081p-1015
v127 = splat.i64x2 v7
v156 -> v127
v128 = stack_addr.i64 ss0
v129 = load.i32 heap v128
v159 -> v129
v130 = select_spectre_guard v129, v13, v13
v131 = splat.i64x2 v7
v132 = sshr v7, v129
v133 = uwiden_low v1
v134 = select v132, v89, v89 ; v89 = -0x1.1e38ea0e18081p-1015, v89 = -0x1.1e38ea0e18081p-1015
v164 -> v134
v135 = uextend.i128 v129
v136 = select_spectre_guard v129, v130, v130
v137 = splat.i64x2 v132
v138 = sshr v132, v129
v139 = uwiden_low v1
v140 = select v138, v89, v134 ; v89 = -0x1.1e38ea0e18081p-1015
v141 = splat.i64x2 v138
v142 = sshr v138, v129
v143 = uwiden_low v1
v144 = select v142, v89, v134 ; v89 = -0x1.1e38ea0e18081p-1015
v145 = select v142, v89, v134 ; v89 = -0x1.1e38ea0e18081p-1015
v146 = splat.i64x2 v142
v147 = sshr v142, v129
v148 = uwiden_low v1
v149 = select v147, v89, v134 ; v89 = -0x1.1e38ea0e18081p-1015
v150 = splat.i64x2 v147
v151 = uextend.i128 v129
v152 = select_spectre_guard v129, v136, v136
v209 -> v152
v289 -> v152
v153 = splat.i64x2 v147
v154 = sshr v147, v129
v158 -> v154
v155 = uwiden_low v1
brif v92, block1, block1
block1 cold:
v157 = bxor_not.i64x2 v156, v156
v160 = sshr.i64 v158, v159
v162 = uwiden_low.i16x8 v161
v165 = select.f64 v160, v163, v164 ; v163 = -0x1.1e38ea0e18081p-1015
v166 = select.f64 v160, v163, v164 ; v163 = -0x1.1e38ea0e18081p-1015
v167 = splat.i64x2 v160
v168 = sshr v160, v159
v169 = uwiden_low.i16x8 v161
v170 = select.f64 v168, v163, v164 ; v163 = -0x1.1e38ea0e18081p-1015
v171 = splat.i64x2 v168
v718 = fadd.f32 v172, v172
v719 = f32const +NaN
v720 = scalar_to_vector.f32x4 v719 ; v719 = +NaN
v721 = scalar_to_vector.f32x4 v718
v722 = fcmp uno v721, v721
v723 = bitcast.f32x4 v722
v724 = bitselect v723, v720, v721
v173 = extractlane v724, 0
v204 -> v173
v174 = bxor_not.i64x2 v156, v171
v194 -> v174
v290 -> v174
stack_store v174, ss9
v175 = fcvt_to_uint_sat.i32 v173
v197 -> v175
v176 = select.f64 v168, v163, v170 ; v163 = -0x1.1e38ea0e18081p-1015
v202 -> v176
v248 -> v176
v298 -> v176
stack_store v176, ss12
v177 = uwiden_low.i16x8 v161
v717 = stack_load.f64 ss12
v178 = select.f64 v168, v163, v717 ; v163 = -0x1.1e38ea0e18081p-1015
v179 = splat.i64x2 v168
v180 = sshr v168, v175
v181 = uwiden_low.i16x8 v161
v716 = stack_load.f64 ss12
v182 = select.f64 v180, v163, v716 ; v163 = -0x1.1e38ea0e18081p-1015
v183 = splat.i64x2 v180
v184 = extractlane.i16x8 v161, 6
v221 -> v184
v288 -> v184
v692 -> v184
v715 = stack_load.f64 ss12
v185 = select.f64 v180, v163, v715 ; v163 = -0x1.1e38ea0e18081p-1015
v186 = splat.i64x2 v180
v187 = sshr v180, v175
v188 = uwiden_low.i16x8 v161
v714 = stack_load.f64 ss12
v189 = select.f64 v187, v163, v714 ; v163 = -0x1.1e38ea0e18081p-1015
v190 = splat.i64x2 v187
v191 = sshr v187, v175
v196 -> v191
v192 = uwiden_low.i16x8 v161
brif.i128 v193, block2, block2
block2 cold:
v195 = bxor_not.i64x2 v194, v194
v198 = sshr.i64 v196, v197
v246 -> v198
v200 = uwiden_low.i16x8 v199
v203 = select.f64 v198, v201, v202 ; v201 = -0x1.1e38ea0e18081p-1015
v247 -> v203
v297 -> v203
v330 -> v203
v374 -> v203
v412 -> v203
v459 -> v203
v489 -> v203
v535 -> v203
v205 = func_addr.i64 fn0
v206 = call_indirect sig0, v205(v204), stack_map=[f32x4 @ ss2+0, i16 @ ss8+0, i8x16 @ ss10+0, f64x2 @ ss11+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss9+0, f64 @ ss12+0]
v364 -> v206
v691 -> v206
v207 = select.i64x2 v198, v194, v195
v210 = rotl.i16 v208, v209
v211 = rotl v210, v209
v212 = rotl v211, v209
v213 = rotl v212, v209
v214 = rotl v213, v209
v215 = rotl v214, v209
v216 = rotl v215, v209
v217 = rotl v216, v209
v219 = select.i16x8 v218, v199, v199
v220 = select v218, v219, v219
v222 = select v221, v220, v220
v224 = select v223, v222, v222
v225 = select v218, v224, v224
v253 -> v225
v295 -> v225
v336 -> v225
v372 -> v225
v410 -> v225
v226 = popcnt v217
v227 = rotl v226, v209
v228 = stack_addr.i64 ss1+50
v229 = load.i32 heap v228+3
v230 = stack_addr.i64 ss1+50
v231 = load.i32 heap v230+3
v232 = stack_addr.i64 ss1+50
v233 = load.i32 heap v232+3
v234 = stack_addr.i64 ss1+50
v235 = load.i32 heap v234+3
v236 = stack_addr.i64 ss1+50
v237 = load.i32 heap v236+3
v238 = stack_addr.i64 ss1+50
v239 = load.i32 heap v238+3
v240 = stack_addr.i64 ss1+50
v241 = load.i32 heap v240+3
v242 = stack_addr.i64 ss1+50
v243 = load.i32 heap v242+3
v244 = stack_addr.i64 ss1+50
v245 = load.i32 heap v244+3
v251 -> v245
brif.i8 v209, block3(v207), block4(v221, v209, v209, v209, v209, v209, v209, v209, v209, v209, v198, v245)
block3(v16: i64x2):
v249 = select.f64 v246, v247, v248
v250 = splat.i64x2 v246
v252 = sshr.i64 v246, v251
v254 = uwiden_low.i16x8 v253
v255 = select.f64 v252, v247, v248
v256 = splat.i64x2 v252
v257 = sshr v252, v251
v258 = swiden_high v254
v259 = uwiden_low.i16x8 v253
v260 = select.f64 v257, v247, v248
v261 = splat.i64x2 v257
v262 = sshr v257, v251
v263 = uwiden_low.i16x8 v253
v264 = select.f64 v262, v247, v248
v265 = splat.i64x2 v262
v266 = sshr v262, v251
v267 = uwiden_low.i16x8 v253
v268 = select.f64 v266, v247, v248
v269 = splat.i64x2 v266
v270 = sshr v266, v251
v271 = uwiden_low.i16x8 v253
v272 = select.f64 v270, v247, v248
v273 = splat.i64x2 v270
v274 = sshr v270, v251
v275 = uwiden_low.i16x8 v253
v276 = stack_addr.i64 ss0+4
v277 = iadd_imm v276, 0
v278 = atomic_cas.i32 v277, v251, v251
v279 = stack_addr.i64 ss1+50
v280 = load.i32 heap v279+3
v281 = stack_addr.i64 ss1+50
v282 = load.i32 heap v281+3
v283 = stack_addr.i64 ss0+36
v284 = load.i32 heap v283+12
v285 = splat.i64x2 v274
v286 = sshr v274, v284
v287 = uwiden_low.i16x8 v253
jump block4(v288, v289, v289, v289, v289, v289, v289, v289, v289, v289, v286, v284)
block4(v17: i16, v18: i8, v19: i8, v20: i8, v21: i8, v22: i8, v23: i8, v24: i8, v25: i8, v26: i8, v292: i64, v293: i32):
v632 -> v17
v650 -> v17
v673 -> v17
v702 -> v17
stack_store v17, ss8
v291 = bxor_not.i64x2 v290, v290
v294 = sshr v292, v293
v296 = uwiden_low.i16x8 v295
v299 = select.f64 v294, v297, v298
v300 = splat.i64x2 v294
v301 = sshr v294, v293
v302 = uwiden_low.i16x8 v295
v303 = select.f64 v301, v297, v298
v304 = splat.i64x2 v301
v305 = sshr v301, v293
v306 = uwiden_low.i16x8 v295
v307 = bxor_not.i64x2 v290, v290
v308 = stack_addr.i64 ss1+50
v309 = load.i32 heap v308+3
v310 = stack_addr.i64 ss1+50
v311 = load.i32 heap v310+2
v312 = sshr v305, v311
v313 = uwiden_low.i16x8 v295
v314 = select.f64 v312, v297, v298
v315 = splat.i64x2 v312
v316 = sshr v312, v311
v317 = uwiden_low.i16x8 v295
v318 = select.f64 v316, v297, v298
v319 = splat.i64x2 v316
v367 -> v319
v320 = stack_addr.i64 ss0
v321 = load.i32 heap v320
v334 -> v321
v370 -> v321
v408 -> v321
v322 = select_spectre_guard v321, v26, v26
v323 = splat.i64x2 v316
v324 = sshr v316, v321
v329 -> v324
v325 = uwiden_low.i16x8 v295
v326 = select.f64 v324, v297, v297
v331 -> v326
v375 -> v326
v413 -> v326
v327 = uextend.i128 v321
v328 = select_spectre_guard v321, v322, v322
jump block5(v328, v328, v328, v328, v328, v328, v328, v328, v328, v328, v328, v328, v328, v328)
block5(v27: i8, v28: i8, v29: i8, v30: i8, v31: i8, v32: i8, v33: i8, v34: i8, v35: i8, v36: i8, v37: i8, v38: i8, v39: i8, v40: i8) cold:
v332 = select.f64 v329, v330, v331
v333 = splat.i64x2 v329
v335 = sshr.i64 v329, v334
v337 = uwiden_low.i16x8 v336
v338 = select.f64 v335, v330, v331
v339 = splat.i64x2 v335
v340 = sshr v335, v334
v341 = splat.i64x2 v340
v342 = sshr v340, v334
v343 = uwiden_low.i16x8 v336
v344 = select.f64 v342, v330, v331
v345 = splat.i64x2 v342
v346 = sshr v342, v334
v347 = uwiden_low.i16x8 v336
v348 = select_spectre_guard v40, v40, v40
v349 = uextend.i128 v334
v350 = select_spectre_guard v334, v348, v348
v351 = splat.i64x2 v346
v352 = sshr v346, v334
v353 = uwiden_low.i16x8 v336
v354 = select.f64 v352, v330, v331
v355 = splat.i64x2 v352
v356 = sshr v352, v334
v357 = uwiden_low.i16x8 v336
v358 = select.f64 v356, v330, v331
v359 = select.f64 v356, v330, v331
v360 = splat.i64x2 v356
v361 = sshr v356, v334
v368 -> v361
v362 = uwiden_low.i16x8 v336
v363 = select.f64 v361, v330, v331
v725 = fadd.f32 v364, v364
v726 = f32const +NaN
v727 = scalar_to_vector.f32x4 v726 ; v726 = +NaN
v728 = scalar_to_vector.f32x4 v725
v729 = fcmp uno v728, v728
v730 = bitcast.f32x4 v729
v731 = bitselect v730, v727, v728
v365 = extractlane v731, 0
v493 -> v365
v693 -> v365
v366 = select v361, v358, v331
brif v350, block6(v350, v350, v350, v350, v350, v367, v350, v350, v350, v350, v350, v350, v350, v350), block7(v350, v350, v350, v350, v350, v350, v350, v350, v350, v350, v350, v350, v350, v350, v367, v361, v184)
block6(v41: i8, v42: i8, v43: i8, v44: i8, v45: i8, v46: i64x2, v47: i8, v48: i8, v49: i8, v50: i8, v51: i8, v52: i8, v53: i8, v54: i8) cold:
v369 = splat.i64x2 v368
v371 = sshr.i64 v368, v370
v373 = uwiden_low.i16x8 v372
v376 = select.f64 v371, v374, v375
v377 = splat.i64x2 v371
v378 = sshr v371, v370
v379 = uwiden_low.i16x8 v372
v380 = select.f64 v378, v374, v375
v381 = splat.i64x2 v378
v382 = extractlane.i16x8 v372, 6
v383 = select.f64 v378, v374, v375
v384 = splat.i64x2 v378
v385 = sshr v378, v370
v386 = uwiden_low.i16x8 v372
v387 = select.f64 v385, v374, v375
v388 = splat.i64x2 v385
v389 = sshr v385, v370
v390 = uwiden_low.i16x8 v372
v391 = select.f64 v389, v374, v375
v392 = splat.i64x2 v389
v393 = sshr v389, v370
v394 = uwiden_low.i16x8 v372
v395 = select.f64 v393, v374, v375
v396 = splat.i64x2 v393
v397 = sshr v393, v370
v398 = uwiden_low.i16x8 v372
v399 = select.f64 v397, v374, v375
v400 = splat.i64x2 v397
v401 = sshr v397, v370
v402 = uwiden_low.i16x8 v372
v403 = select.f64 v401, v374, v375
v404 = splat.i64x2 v401
jump block7(v54, v54, v54, v54, v54, v54, v54, v54, v54, v54, v54, v54, v54, v54, v46, v401, v382)
block7(v55: i8, v56: i8, v57: i8, v58: i8, v59: i8, v60: i8, v61: i8, v62: i8, v63: i8, v64: i8, v65: i8, v66: i8, v67: i8, v68: i8, v405: i64x2, v407: i64, v422: i16) cold:
v450 -> v405
v406 = bxor_not v405, v405
v409 = sshr v407, v408
v411 = uwiden_low.i16x8 v410
v414 = select.f64 v409, v412, v413
v460 -> v414
v415 = uwiden_low.i16x8 v410
v416 = select.f64 v409, v412, v414
v417 = stack_addr.i64 ss1+28
store.i32 notrap heap v408, v417
v418 = splat.i64x2 v409
v419 = sshr v409, v408
v420 = uwiden_low.i16x8 v410
v421 = select.f64 v419, v412, v414
v423 = ushr.i16x8 v410, v422
v448 -> v423
v486 -> v423
v532 -> v423
v424 = stack_addr.i64 ss1+50
v425 = load.i32 heap v424+3
v426 = stack_addr.i64 ss1+50
v427 = load.i32 heap v426+2
v428 = stack_addr.i64 ss1+50
v429 = load.i32 heap v428+3
v430 = stack_addr.i64 ss1+50
v431 = load.i32 heap v430+3
v432 = stack_addr.i64 ss1+50
v433 = load.i32 heap v432+3
v434 = stack_addr.i64 ss0+36
v435 = load.i32 heap v434+12
v436 = splat.i64x2 v419
v437 = sshr v419, v435
v438 = uwiden_low v423
v439 = select.f64 v437, v412, v414
v440 = splat.i64x2 v437
v441 = sshr v437, v435
v442 = uwiden_low v423
v443 = select.f64 v441, v412, v414
v444 = splat.i64x2 v441
v445 = sshr v441, v435
v456 -> v445
v446 = uwiden_low v423
v447 = select.f64 v445, v412, v414
brif v445, block8(v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v422), block8(v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v68, v422)
block8(v69: i8, v70: i8, v71: i8, v72: i8, v73: i8, v74: i8, v75: i8, v76: i8, v77: i8, v78: i8, v79: i8, v80: i8, v81: i8, v82: i16) cold:
v580 -> v82
v633 -> v82
v449 = uwiden_low.i16x8 v448
v451 = bxor_not.i64x2 v450, v450
v452 = stack_addr.i64 ss1+50
v453 = load.i32 heap v452+3
v454 = stack_addr.i64 ss1+50
v455 = load.i32 heap v454+2
v457 = sshr.i64 v456, v455
v458 = uwiden_low.i16x8 v448
v461 = select.f64 v457, v459, v460
v462 = splat.i64x2 v457
v463 = sshr v457, v455
v464 = uwiden_low.i16x8 v448
v465 = select.f64 v463, v459, v460
v466 = splat.i64x2 v463
v495 -> v466
v467 = stack_addr.i64 ss0
v468 = load.i32 heap v467
v469 = select_spectre_guard v468, v81, v81
v470 = splat.i64x2 v463
v471 = sshr v463, v468
v472 = uwiden_low.i16x8 v448
v473 = select.f64 v471, v459, v459
v490 -> v473
v474 = uextend.i128 v468
v475 = select_spectre_guard v468, v469, v469
v476 = splat.i64x2 v471
v477 = sshr v471, v468
v478 = uwiden_low.i16x8 v448
v479 = select.f64 v477, v459, v473
v480 = splat.i64x2 v477
v481 = sshr v477, v468
v488 -> v481
v482 = uwiden_low.i16x8 v448
v483 = select.f64 v481, v459, v473
v506 -> v483
v602 -> v483
stack_store v483, ss12
v484 = select.f64 v481, v459, v473
v485 = splat.i64x2 v481
brif v475, block9(v475), block9(v475)
block9(v83: i8):
v487 = uwiden_low.i16x8 v486
v491 = select.f64 v488, v489, v490
v492 = splat.i64x2 v488
v732 = fadd.f32 v493, v493
v733 = f32const +NaN
v734 = scalar_to_vector.f32x4 v733 ; v733 = +NaN
v735 = scalar_to_vector.f32x4 v732
v736 = fcmp uno v735, v735
v737 = bitcast.f32x4 v736
v738 = bitselect v737, v734, v735
v494 = extractlane v738, 0
v496 = bxor_not.i64x2 v495, v492
v497 = fcvt_to_uint_sat.i32 v494
v498 = select.f64 v488, v489, v490
v499 = splat.i64x2 v488
v500 = sshr.i64 v488, v497
v501 = uwiden_low.i16x8 v486
v502 = select.f64 v500, v489, v490
v503 = splat.i64x2 v500
v504 = sshr v500, v497
v505 = uwiden_low.i16x8 v486
v507 = select.f64 v504, v506, v490
v508 = splat.i64x2 v504
v509 = sshr v504, v497
v510 = uwiden_low.i16x8 v486
v511 = select.f64 v509, v489, v490
v512 = splat.i64x2 v509
v513 = sshr v509, v497
v514 = uwiden_low.i16x8 v486
v515 = select.f64 v513, v489, v490
v516 = splat.i64x2 v513
v517 = sshr v513, v497
v534 -> v517
v518 = uwiden_low.i16x8 v486
v519 = select.f64 v517, v490, v490
v536 -> v519
stack_store v519, ss6
v739 = fadd v494, v494
v740 = f32const +NaN
v741 = scalar_to_vector.f32x4 v740 ; v740 = +NaN
v742 = scalar_to_vector.f32x4 v739
v743 = fcmp uno v742, v742
v744 = bitcast.f32x4 v743
v745 = bitselect v744, v741, v742
v520 = extractlane v745, 0
v576 -> v520
v521 = bxor_not v496, v516
v645 -> v521
stack_store v521, ss9
v522 = fcvt_to_uint_sat.i32 v520
jump block10
block10 cold:
v523 = stack_addr.i64 ss1+50
v524 = load.i32 heap v523+3
v525 = stack_addr.i64 ss1+50
v526 = load.i32 heap v525+3
v527 = stack_addr.i64 ss1+50
v528 = load.i32 heap v527+3
v529 = stack_addr.i64 ss1+50
v530 = load.i32 heap v529+3
v531 = sextend.i128 v530
stack_store v531, ss13
v533 = uwiden_low.i16x8 v532
v537 = select.f64 v534, v535, v536
v538 = splat.i64x2 v534
v539 = sshr.i64 v534, v530
v540 = uwiden_low.i16x8 v532
v541 = select.f64 v539, v535, v536
v542 = splat.i64x2 v539
v543 = sshr v539, v530
v544 = uwiden_low.i16x8 v532
v545 = select.f64 v543, v535, v536
v546 = splat.i64x2 v543
v547 = sshr v543, v530
v548 = uwiden_low.i16x8 v532
v549 = select.f64 v547, v535, v536
v550 = splat.i64x2 v547
v551 = sshr v547, v530
v552 = uwiden_low.i16x8 v532
v553 = select.f64 v551, v535, v536
v554 = splat.i64x2 v551
v555 = sshr v551, v530
v556 = uwiden_low.i16x8 v532
v557 = select.f64 v555, v535, v536
v558 = splat.i64x2 v555
v559 = sshr v555, v530
v560 = uwiden_low.i16x8 v532
v561 = select.f64 v559, v535, v536
v562 = splat.i64x2 v559
v564 = bnot.i8x16 v563
v647 -> v564
v565 = select.f64 v559, v535, v536
v566 = splat.i64x2 v559
v567 = sshr v559, v530
v568 = uwiden_low.i16x8 v532
v569 = select.f64 v567, v535, v536
v570 = splat.i64x2 v567
v571 = sshr v567, v530
v661 -> v571
v689 -> v571
v572 = uwiden_low.i16x8 v532
v573 = select.f64 v571, v535, v536
v575 = rotr.i128 v574, v571
v688 -> v575
v577 = call fn2(v576), stack_map=[f32x4 @ ss2+0, i8x16 @ ss10+0, i16 @ ss8+0, f64x2 @ ss11+0, f32x4 @ ss3+0, f64x2 @ ss4+0, f64 @ ss12+0, f64 @ ss6+0, i64x2 @ ss9+0, i128 @ ss13+0]
v578 = select.f64 v571, v535, v536
v579 = splat.i64x2 v571
v662 -> v579
v685 -> v579
stack_store v579, ss5
v713 = stack_load.i128 ss13
v581 = sshr v713, v580
v582 = stack_addr.i64 ss1+50
v583 = load.i32 heap v582+3
v584 = stack_addr.i64 ss1+50
v585 = load.i32 heap v584+3
v586 = stack_addr.i64 ss1+50
v587 = load.i32 heap v586+3
v588 = stack_addr.i64 ss1+50
v589 = load.i32 heap v588+7
v627 -> v589
v641 -> v589
stack_store v589, ss7
v590 = uwiden_low.i16x8 v532
v591 = select.f64 v571, v535, v536
v656 -> v591
v682 -> v591
stack_store v591, ss6
v592 = call fn0(v577), stack_map=[f32x4 @ ss2+0, i8x16 @ ss10+0, i16 @ ss8+0, f64x2 @ ss11+0, f32x4 @ ss3+0, f64x2 @ ss4+0, f64 @ ss12+0, i64x2 @ ss9+0, i64x2 @ ss5+0, i32 @ ss7+0, f64 @ ss6+0]
v593 = call fn5(v535), stack_map=[f32x4 @ ss2+0, i8x16 @ ss10+0, i16 @ ss8+0, f64x2 @ ss11+0, f32x4 @ ss3+0, f64x2 @ ss4+0, f64 @ ss12+0, i64x2 @ ss9+0, i64x2 @ ss5+0, i32 @ ss7+0, f64 @ ss6+0]
v595 = select.i16x8 v594, v532, v532
v596 = select v574, v595, v595
v652 -> v596
v597 = smax v590, v590
v598 = smax v597, v597
v599 = smax v598, v598
v609 -> v599
stack_store v599, ss11
v712 = stack_load.f64 ss6
v601 = insertlane.f64x2 v600, v712, 0 ; v600 = const1
v603 = call fn1(v602), stack_map=[f32x4 @ ss2+0, i8x16 @ ss10+0, i16 @ ss8+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss9+0, i64x2 @ ss5+0, i32 @ ss7+0, f64 @ ss6+0, i32x4 @ ss11+0]
v604 = bxor v592, v592
v606 -> v604
v605 = ushr v581, v571
jump block11
block11 cold:
v746 = fadd.f32 v606, v606
v747 = f32const +NaN
v748 = scalar_to_vector.f32x4 v747 ; v747 = +NaN
v749 = scalar_to_vector.f32x4 v746
v750 = fcmp uno v749, v749
v751 = bitcast.f32x4 v750
v752 = bitselect v751, v748, v749
v607 = extractlane v752, 0
v753 = fadd v607, v607
v754 = f32const +NaN
v755 = scalar_to_vector.f32x4 v754 ; v754 = +NaN
v756 = scalar_to_vector.f32x4 v753
v757 = fcmp uno v756, v756
v758 = bitcast.f32x4 v757
v759 = bitselect v758, v755, v756
v608 = extractlane v759, 0
v610 = umin.i32x4 v609, v609
v611 = smax v610, v610
v612 = smax v611, v611
v613 = smax v612, v612
v614 = smax v613, v613
v615 = smax v614, v614
v616 = smax v615, v615
v617 = smax v616, v616
v618 = smax v617, v617
v619 = smax v618, v618
v620 = smax v619, v619
v621 = smax v620, v620
v622 = smax v621, v621
v623 = smax v622, v622
v624 = smax v623, v623
v625 = smax v624, v624
v626 = smax v625, v625
v628 = select_spectre_guard v627, v608, v608
v629 = select_spectre_guard v627, v628, v628
v630 = select_spectre_guard v627, v629, v629
v631 = select_spectre_guard v627, v630, v630
v634 = bxor.i16 v632, v633
v672 -> v634
v635 = select_spectre_guard v627, v631, v631
v636 = select_spectre_guard v627, v635, v635
v637 = select_spectre_guard v627, v636, v636
v638 = select_spectre_guard v627, v637, v637
v639 = select_spectre_guard v627, v638, v638
v640 = select_spectre_guard v627, v639, v639
v642 -> v640
jump block12
block12 cold:
v643 = select_spectre_guard.f32 v641, v642, v642
v644 = select_spectre_guard v641, v643, v643
v646 = vany_true.i64x2 v645
v649 = iadd.i8x16 v647, v648
v651 = select.i32 v650, v641, v641
v674 -> v651
v676 -> v651
v687 -> v651
stack_store v651, ss7
v653 = ishl.i16x8 v652, v646
v681 -> v653
v654 = func_addr.i64 fn0
v655 = call_indirect sig0, v654(v644), stack_map=[f32x4 @ ss2+0, i16 @ ss8+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss9+0, i64x2 @ ss5+0, f64 @ ss6+0, i32 @ ss7+0]
v657 = func_addr.i64 fn1
v658 = call_indirect sig1, v657(v656), stack_map=[f32x4 @ ss2+0, i16 @ ss8+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss9+0, i64x2 @ ss5+0, f64 @ ss6+0, i32 @ ss7+0]
v659 = func_addr.i64 fn0
v660 = call_indirect sig0, v659(v655), stack_map=[f32x4 @ ss2+0, i16 @ ss8+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss9+0, i64x2 @ ss5+0, f64 @ ss6+0, i32 @ ss7+0]
v663 = select.i64x2 v661, v645, v662
v664 = func_addr.i64 fn0
v665 = call_indirect sig0, v664(v660), stack_map=[f32x4 @ ss2+0, i16 @ ss8+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss5+0, f64 @ ss6+0, i32 @ ss7+0]
v666 -> v665
v711 = stack_load.i32 ss7
br_table v711, block13(v646), [block13(v646), block13(v646), block13(v646)]
block13(v84: i8) cold:
v675 -> v84
v667 = func_addr.i64 fn0
v668 = call_indirect sig0, v667(v666), stack_map=[f32x4 @ ss2+0, i16 @ ss8+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss5+0, f64 @ ss6+0, i32 @ ss7+0]
v669 -> v668
jump block14
block14:
v670 = func_addr.i64 fn0
v671 = call_indirect sig0, v670(v669), stack_map=[f32x4 @ ss2+0, i16 @ ss8+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss5+0, f64 @ ss6+0, i32 @ ss7+0]
v677 -> v671
br_table v674, block15(v673), [block15(v672), block15(v673), block15(v672)]
block15(v85: i16):
v690 -> v85
br_table v676, block16(v675), [block16(v675), block16(v675), block16(v675)]
block16(v86: i8):
v678 = func_addr.i64 fn0
v679 = call_indirect sig0, v678(v677), stack_map=[f32x4 @ ss2+0, f32x4 @ ss3+0, f64x2 @ ss4+0, i64x2 @ ss5+0, f64 @ ss6+0, i32 @ ss7+0]
return v680, v681, v682, v683, v684, v685, v86, v86, v686, v687, v679, v688, v687, v689, v679, v690 ; v683 = const2, v686 = const1
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment