@@ -431,9 +431,10 @@ public void EvalLossSequence()
431431 var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
432432
433433 var eval = seq . Forward ( x ) ;
434- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . Sum ) ;
434+ var loss = NN . LossFunction . MSE ( NN . Reduction . Sum ) ;
435+ var output = loss ( eval , y ) ;
435436
436- var result = loss . DataItem < float > ( ) ;
437+ var result = output . DataItem < float > ( ) ;
437438 Assert . IsNotNull ( result ) ;
438439 }
439440
@@ -444,9 +445,9 @@ public void TestPoissonNLLLoss()
444445 using ( TorchTensor target = FloatTensor . From ( new float [ ] { 1f , 2f , 3f } ) )
445446 {
446447 var componentWiseLoss = ( ( TorchTensor ) input . Exp ( ) ) - target * input ;
447- Assert . IsTrue ( componentWiseLoss . Equal ( NN . LossFunction . PoissonNLL ( input , target , reduction : NN . Reduction . None ) ) ) ;
448- Assert . IsTrue ( componentWiseLoss . Sum ( ) . Equal ( NN . LossFunction . PoissonNLL ( input , target , reduction : NN . Reduction . Sum ) ) ) ;
449- Assert . IsTrue ( componentWiseLoss . Mean ( ) . Equal ( NN . LossFunction . PoissonNLL ( input , target , reduction : NN . Reduction . Mean ) ) ) ;
448+ Assert . IsTrue ( componentWiseLoss . Equal ( NN . LossFunction . PoissonNLL ( reduction : NN . Reduction . None ) ( input , target ) ) ) ;
449+ Assert . IsTrue ( componentWiseLoss . Sum ( ) . Equal ( NN . LossFunction . PoissonNLL ( reduction : NN . Reduction . Sum ) ( input , target ) ) ) ;
450+ Assert . IsTrue ( componentWiseLoss . Mean ( ) . Equal ( NN . LossFunction . PoissonNLL ( reduction : NN . Reduction . Mean ) ( input , target ) ) ) ;
450451 }
451452 }
452453
@@ -456,7 +457,7 @@ public void TestPoissonNLLLoss2()
456457 using ( TorchTensor input = FloatTensor . Random ( new long [ ] { 5 , 2 } ) )
457458 using ( TorchTensor target = FloatTensor . Random ( new long [ ] { 5 , 2 } ) )
458459 {
459- Assert . IsNotNull ( NN . LossFunction . PoissonNLL ( input , target , true , true ) ) ;
460+ Assert . IsNotNull ( NN . LossFunction . PoissonNLL ( true , true ) ( input , target ) ) ;
460461 }
461462 }
462463
@@ -481,11 +482,12 @@ public void TestBackward()
481482 var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
482483
483484 var eval = seq . Forward ( x ) ;
484- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . None ) ;
485+ var loss = NN . LossFunction . MSE ( NN . Reduction . None ) ;
486+ var output = loss ( eval , y ) ;
485487
486488 seq . ZeroGrad ( ) ;
487489
488- loss . Backward ( ) ;
490+ output . Backward ( ) ;
489491 }
490492
491493 [ TestMethod ]
@@ -499,11 +501,12 @@ public void TestGettingParameters()
499501 var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
500502
501503 var eval = seq . Forward ( x ) ;
502- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . None ) ;
504+ var loss = NN . LossFunction . MSE ( NN . Reduction . None ) ;
505+ var output = loss ( eval , y ) ;
503506
504507 seq . ZeroGrad ( ) ;
505508
506- loss . Backward ( ) ;
509+ output . Backward ( ) ;
507510
508511 foreach ( var parm in seq . Parameters ( ) )
509512 {
@@ -522,11 +525,12 @@ public void TestGrad()
522525 var y = FloatTensor . RandomN ( new long [ ] { 64 , 10 } , device : "cpu:0" ) ;
523526
524527 var eval = seq . Forward ( x ) ;
525- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . None ) ;
528+ var loss = NN . LossFunction . MSE ( NN . Reduction . None ) ;
529+ var output = loss ( eval , y ) ;
526530
527531 seq . ZeroGrad ( ) ;
528532
529- loss . Backward ( ) ;
533+ output . Backward ( ) ;
530534
531535 foreach ( var parm in seq . Parameters ( ) )
532536 {
@@ -658,19 +662,20 @@ public void TestTraining()
658662
659663 float learning_rate = 0.00004f ;
660664 float prevLoss = float . MaxValue ;
665+ var loss = NN . LossFunction . MSE ( NN . Reduction . Sum ) ;
661666
662667 for ( int i = 0 ; i < 10 ; i ++ )
663668 {
664669 var eval = seq . Forward ( x ) ;
665- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . Sum ) ;
666- var lossVal = loss . DataItem < float > ( ) ;
670+ var output = loss ( eval , y ) ;
671+ var lossVal = output . DataItem < float > ( ) ;
667672
668673 Assert . IsTrue ( lossVal < prevLoss ) ;
669674 prevLoss = lossVal ;
670675
671676 seq . ZeroGrad ( ) ;
672677
673- loss . Backward ( ) ;
678+ output . Backward ( ) ;
674679
675680 using ( var noGrad = new AutoGradMode ( false ) )
676681 {
@@ -715,19 +720,20 @@ public void TestTrainingAdam()
715720 double learning_rate = 0.00004f ;
716721 float prevLoss = float . MaxValue ;
717722 var optimizer = NN . Optimizer . Adam ( seq . Parameters ( ) , learning_rate ) ;
723+ var loss = NN . LossFunction . MSE ( NN . Reduction . Sum ) ;
718724
719725 for ( int i = 0 ; i < 10 ; i ++ )
720726 {
721727 var eval = seq . Forward ( x ) ;
722- var loss = NN . LossFunction . MSE ( eval , y , NN . Reduction . Sum ) ;
723- var lossVal = loss . DataItem < float > ( ) ;
728+ var output = loss ( eval , y ) ;
729+ var lossVal = output . DataItem < float > ( ) ;
724730
725731 Assert . IsTrue ( lossVal < prevLoss ) ;
726732 prevLoss = lossVal ;
727733
728734 optimizer . ZeroGrad ( ) ;
729735
730- loss . Backward ( ) ;
736+ output . Backward ( ) ;
731737
732738 optimizer . Step ( ) ;
733739 }
0 commit comments