@@ -130,53 +130,34 @@ impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
130130 }
131131}
132132
133- #[ derive( Clone , Copy , PartialEq , Eq ) ]
134- enum I128MathOp {
135- Add ,
136- Sub ,
137- Mul ,
138- And ,
139- Or ,
140- Xor ,
141- Shl ,
142- LShr ,
143- AShr ,
144- Neg ,
145- Not ,
146- UDiv ,
147- SDiv ,
148- URem ,
149- SRem ,
150- }
151-
152- macro_rules! math_builder_methods {
153- ( $( $name: ident( $( $arg: ident) ,* ) => $spec: tt) ,+ $( , ) ?) => {
154- $( math_builder_methods!( @expand $name ( $( $arg) ,* ) => $spec) ; ) +
155- } ;
156- ( @expand $name: ident ( $( $arg: ident) ,* ) => ( $llvm_capi: ident, $i128_op: expr) ) => {
157- fn $name( & mut self , $( $arg: & ' ll Value ) ,* ) -> & ' ll Value {
158- let args_slice: & [ & ' ll Value ] = & [ $( $arg) ,* ] ;
159- if let Some ( result) = self . try_emulate_i128( $i128_op, args_slice) {
160- return result;
133+ macro_rules! imath_builder_methods {
134+ ( $( $self_: ident. $name: ident( $( $arg: ident) ,* ) => $llvm_capi: ident => $op: block) +) => {
135+ $( fn $name( & mut $self_, $( $arg: & ' ll Value ) ,* ) -> & ' ll Value {
136+ // Dispatch to i128 emulation when any operand is 128 bits wide.
137+ if $( $self_. is_i128( $arg) ) ||*
138+ $op
139+ else {
140+ unsafe {
141+ trace!( "binary expr: {:?} with args {:?}" , stringify!( $name) , [ $( $arg) ,* ] ) ;
142+ llvm:: $llvm_capi( $self_. llbuilder, $( $arg, ) * UNNAMED )
143+ }
161144 }
145+ } ) +
146+ }
147+ }
162148
149+ macro_rules! fmath_builder_methods {
150+ ( $( $self_: ident. $name: ident( $( $arg: ident) ,* ) => $llvm_capi: ident) +) => {
151+ $( fn $name( & mut $self_, $( $arg: & ' ll Value ) ,* ) -> & ' ll Value {
163152 unsafe {
164153 trace!( "binary expr: {:?} with args {:?}" , stringify!( $name) , [ $( $arg) ,* ] ) ;
165- llvm:: $llvm_capi( self . llbuilder, $( $arg, ) * UNNAMED )
166- }
167- }
168- } ;
169- ( @expand $name: ident ( $( $arg: ident) ,* ) => $llvm_capi: ident) => {
170- fn $name( & mut self , $( $arg: & ' ll Value ) ,* ) -> & ' ll Value {
171- unsafe {
172- trace!( "binary expr: {:?} with args {:?}" , stringify!( $name) , [ $( $arg) ,* ] ) ;
173- llvm:: $llvm_capi( self . llbuilder, $( $arg, ) * UNNAMED )
154+ llvm:: $llvm_capi( $self_. llbuilder, $( $arg, ) * UNNAMED )
174155 }
175- }
176- } ;
156+ } ) +
157+ }
177158}
178159
179- macro_rules! set_math_builder_methods {
160+ macro_rules! set_fmath_builder_methods {
180161 ( $( $name: ident( $( $arg: ident) ,* ) => ( $llvm_capi: ident, $llvm_set_math: ident) ) ,+ $( , ) ?) => {
181162 $( fn $name( & mut self , $( $arg: & ' ll Value ) ,* ) -> & ' ll Value {
182163 unsafe {
@@ -329,39 +310,56 @@ impl<'ll, 'tcx, 'a> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
329310 }
330311 }
331312
332- math_builder_methods ! {
333- add( a, b) => ( LLVMBuildAdd , I128MathOp :: Add ) ,
334- fadd( a, b) => LLVMBuildFAdd ,
335- sub( a, b) => ( LLVMBuildSub , I128MathOp :: Sub ) ,
336- fsub( a, b) => LLVMBuildFSub ,
337- mul( a, b) => ( LLVMBuildMul , I128MathOp :: Mul ) ,
338- fmul( a, b) => LLVMBuildFMul ,
339- udiv( a, b) => ( LLVMBuildUDiv , I128MathOp :: UDiv ) ,
340- exactudiv( a, b) => ( LLVMBuildExactUDiv , I128MathOp :: UDiv ) ,
341- sdiv( a, b) => ( LLVMBuildSDiv , I128MathOp :: SDiv ) ,
342- exactsdiv( a, b) => ( LLVMBuildExactSDiv , I128MathOp :: SDiv ) ,
343- fdiv( a, b) => LLVMBuildFDiv ,
344- urem( a, b) => ( LLVMBuildURem , I128MathOp :: URem ) ,
345- srem( a, b) => ( LLVMBuildSRem , I128MathOp :: SRem ) ,
346- frem( a, b) => LLVMBuildFRem ,
347- shl( a, b) => ( LLVMBuildShl , I128MathOp :: Shl ) ,
348- lshr( a, b) => ( LLVMBuildLShr , I128MathOp :: LShr ) ,
349- ashr( a, b) => ( LLVMBuildAShr , I128MathOp :: AShr ) ,
350- and( a, b) => ( LLVMBuildAnd , I128MathOp :: And ) ,
351- or( a, b) => ( LLVMBuildOr , I128MathOp :: Or ) ,
352- xor( a, b) => ( LLVMBuildXor , I128MathOp :: Xor ) ,
353- neg( x) => ( LLVMBuildNeg , I128MathOp :: Neg ) ,
354- fneg( x) => LLVMBuildFNeg ,
355- not( x) => ( LLVMBuildNot , I128MathOp :: Not ) ,
356- unchecked_sadd( x, y) => ( LLVMBuildNSWAdd , I128MathOp :: Add ) ,
357- unchecked_uadd( x, y) => ( LLVMBuildNUWAdd , I128MathOp :: Add ) ,
358- unchecked_ssub( x, y) => ( LLVMBuildNSWSub , I128MathOp :: Sub ) ,
359- unchecked_usub( x, y) => ( LLVMBuildNUWSub , I128MathOp :: Sub ) ,
360- unchecked_smul( x, y) => ( LLVMBuildNSWMul , I128MathOp :: Mul ) ,
361- unchecked_umul( x, y) => ( LLVMBuildNUWMul , I128MathOp :: Mul ) ,
362- }
363-
364- set_math_builder_methods ! {
313+ imath_builder_methods ! {
314+ self . add( a, b) => LLVMBuildAdd => { self . emulate_i128_add( a, b) }
315+ self . unchecked_uadd( a, b) => LLVMBuildNUWAdd => { self . emulate_i128_add( a, b) }
316+ self . unchecked_sadd( a, b) => LLVMBuildNSWAdd => { self . emulate_i128_add( a, b) }
317+
318+ self . sub( a, b) => LLVMBuildSub => { self . emulate_i128_sub( a, b) }
319+ self . unchecked_usub( a, b) => LLVMBuildNUWSub => { self . emulate_i128_sub( a, b) }
320+ self . unchecked_ssub( a, b) => LLVMBuildNSWSub => { self . emulate_i128_sub( a, b) }
321+
322+ self . mul( a, b) => LLVMBuildMul => { self . emulate_i128_mul( a, b) }
323+ self . unchecked_umul( a, b) => LLVMBuildNUWMul => { self . emulate_i128_mul( a, b) }
324+ self . unchecked_smul( a, b) => LLVMBuildNSWMul => { self . emulate_i128_mul( a, b) }
325+
326+ self . udiv( a, b) => LLVMBuildUDiv => { self . emulate_i128_udiv( a, b) }
327+ self . exactudiv( a, b) => LLVMBuildExactUDiv => { self . emulate_i128_udiv( a, b) }
328+ self . sdiv( a, b) => LLVMBuildSDiv => { self . emulate_i128_sdiv( a, b) }
329+ self . exactsdiv( a, b) => LLVMBuildExactSDiv => { self . emulate_i128_sdiv( a, b) }
330+ self . urem( a, b) => LLVMBuildURem => { self . emulate_i128_urem( a, b) }
331+ self . srem( a, b) => LLVMBuildSRem => { self . emulate_i128_srem( a, b) }
332+
333+ self . shl( a, b) => LLVMBuildShl => {
334+ let b = self . trunc( b, self . type_i32( ) ) ;
335+ self . emulate_i128_shl( a, b)
336+ }
337+ self . lshr( a, b) => LLVMBuildLShr => {
338+ let b = self . trunc( b, self . type_i32( ) ) ;
339+ self . emulate_i128_lshr( a, b)
340+ }
341+ self . ashr( a, b) => LLVMBuildAShr => {
342+ let b = self . trunc( b, self . type_i32( ) ) ;
343+ self . emulate_i128_ashr( a, b)
344+ }
345+
346+ self . and( a, b) => LLVMBuildAnd => { self . emulate_i128_and( a, b) }
347+ self . or( a, b) => LLVMBuildOr => { self . emulate_i128_or( a, b) }
348+ self . xor( a, b) => LLVMBuildXor => { self . emulate_i128_xor( a, b) }
349+ self . neg( a) => LLVMBuildNeg => { self . emulate_i128_neg( a) }
350+ self . not( a) => LLVMBuildNot => { self . emulate_i128_not( a) }
351+ }
352+
353+ fmath_builder_methods ! {
354+ self . fadd( a, b) => LLVMBuildFAdd
355+ self . fsub( a, b) => LLVMBuildFSub
356+ self . fmul( a, b) => LLVMBuildFMul
357+ self . fdiv( a, b) => LLVMBuildFDiv
358+ self . frem( a, b) => LLVMBuildFRem
359+ self . fneg( a) => LLVMBuildFNeg
360+ }
361+
362+ set_fmath_builder_methods ! {
365363 fadd_fast( x, y) => ( LLVMBuildFAdd , LLVMRustSetFastMath ) ,
366364 fsub_fast( x, y) => ( LLVMBuildFSub , LLVMRustSetFastMath ) ,
367365 fmul_fast( x, y) => ( LLVMBuildFMul , LLVMRustSetFastMath ) ,
@@ -1399,78 +1397,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
13991397 unsafe { llvm:: LLVMBuildBitCast ( self . llbuilder , vec, self . type_i128 ( ) , UNNAMED ) }
14001398 }
14011399
1402- fn try_emulate_i128 ( & mut self , op : I128MathOp , args : & [ & ' ll Value ] ) -> Option < & ' ll Value > {
1403- if !args. iter ( ) . any ( |& arg| self . is_i128 ( arg) ) {
1404- return None ;
1405- }
1406-
1407- Some ( match op {
1408- I128MathOp :: Add => {
1409- assert_eq ! ( args. len( ) , 2 ) ;
1410- self . emulate_i128_add ( args[ 0 ] , args[ 1 ] )
1411- }
1412- I128MathOp :: Sub => {
1413- assert_eq ! ( args. len( ) , 2 ) ;
1414- self . emulate_i128_sub ( args[ 0 ] , args[ 1 ] )
1415- }
1416- I128MathOp :: Mul => {
1417- assert_eq ! ( args. len( ) , 2 ) ;
1418- self . emulate_i128_mul ( args[ 0 ] , args[ 1 ] )
1419- }
1420- I128MathOp :: And => {
1421- assert_eq ! ( args. len( ) , 2 ) ;
1422- self . emulate_i128_and ( args[ 0 ] , args[ 1 ] )
1423- }
1424- I128MathOp :: Or => {
1425- assert_eq ! ( args. len( ) , 2 ) ;
1426- self . emulate_i128_or ( args[ 0 ] , args[ 1 ] )
1427- }
1428- I128MathOp :: Xor => {
1429- assert_eq ! ( args. len( ) , 2 ) ;
1430- self . emulate_i128_xor ( args[ 0 ] , args[ 1 ] )
1431- }
1432- I128MathOp :: Shl => {
1433- assert_eq ! ( args. len( ) , 2 ) ;
1434- let shift_amt = self . trunc ( args[ 1 ] , self . type_i32 ( ) ) ;
1435- self . emulate_i128_shl ( args[ 0 ] , shift_amt)
1436- }
1437- I128MathOp :: LShr => {
1438- assert_eq ! ( args. len( ) , 2 ) ;
1439- let shift_amt = self . trunc ( args[ 1 ] , self . type_i32 ( ) ) ;
1440- self . emulate_i128_lshr ( args[ 0 ] , shift_amt)
1441- }
1442- I128MathOp :: AShr => {
1443- assert_eq ! ( args. len( ) , 2 ) ;
1444- let shift_amt = self . trunc ( args[ 1 ] , self . type_i32 ( ) ) ;
1445- self . emulate_i128_ashr ( args[ 0 ] , shift_amt)
1446- }
1447- I128MathOp :: Neg => {
1448- assert_eq ! ( args. len( ) , 1 ) ;
1449- self . emulate_i128_neg ( args[ 0 ] )
1450- }
1451- I128MathOp :: Not => {
1452- assert_eq ! ( args. len( ) , 1 ) ;
1453- self . emulate_i128_not ( args[ 0 ] )
1454- }
1455- I128MathOp :: UDiv => {
1456- assert_eq ! ( args. len( ) , 2 ) ;
1457- self . emulate_i128_udiv ( args[ 0 ] , args[ 1 ] )
1458- }
1459- I128MathOp :: SDiv => {
1460- assert_eq ! ( args. len( ) , 2 ) ;
1461- self . emulate_i128_sdiv ( args[ 0 ] , args[ 1 ] )
1462- }
1463- I128MathOp :: URem => {
1464- assert_eq ! ( args. len( ) , 2 ) ;
1465- self . emulate_i128_urem ( args[ 0 ] , args[ 1 ] )
1466- }
1467- I128MathOp :: SRem => {
1468- assert_eq ! ( args. len( ) , 2 ) ;
1469- self . emulate_i128_srem ( args[ 0 ] , args[ 1 ] )
1470- }
1471- } )
1472- }
1473-
14741400 // Multiply two u64 values and return the full 128-bit product as (lo, hi)
14751401 fn mul_u64_to_u128 ( & mut self , lhs : & ' ll Value , rhs : & ' ll Value ) -> ( & ' ll Value , & ' ll Value ) {
14761402 let i64_ty = self . type_i64 ( ) ;
0 commit comments