@@ -445,4 +445,252 @@ static void secp256k1_bulletproofs_pp_rangeproof_prove_round3_impl(
445445 }
446446}
447447
448+ /* Round 4 of the proof. Computes the norm proof on the w and l values.
449+ * Can fail only when the norm proof fails.
450+ * This should not happen in our setting because w_vec and l_vec and uniformly
451+ * distributed and thus norm argument can only fail when the lengths are not a
452+ * power of two or if the allocated proof size is not enough.
453+ *
454+ * We check for both of these conditions beforehand, therefore in practice this
455+ * function should never fail because it returns point at infinity during some
456+ * interim calculations. However, since the overall API can fail, we also fail
457+ * if the norm proofs fails for any reason.
458+ */
459+ static int secp256k1_bulletproofs_pp_rangeproof_prove_round4_impl (
460+ const secp256k1_context * ctx ,
461+ secp256k1_scratch_space * scratch ,
462+ const secp256k1_bulletproofs_generators * gens ,
463+ const secp256k1_ge * asset_genp ,
464+ secp256k1_bulletproofs_pp_rangeproof_prover_context * prover_ctx ,
465+ unsigned char * output ,
466+ size_t * output_len ,
467+ secp256k1_sha256 * transcript ,
468+ const secp256k1_scalar * gamma ,
469+ const size_t num_digits ,
470+ const size_t digit_base
471+ ) {
472+ size_t i ;
473+ size_t g_offset = digit_base > num_digits ? digit_base : num_digits ;
474+ /* Compute w = s + t*m + t^2*d + t^3*r + t^4*alpha_m. Store w in s*/
475+ /* Has capacity 8 because we can re-use it as the c-poly. */
476+ secp256k1_scalar t_pows [8 ];
477+ secp256k1_bulletproofs_pp_rangeproof_powers_of_q (& t_pows [0 ], & prover_ctx -> t , 7 ); /* Computes from t^1 to t^7 */
478+
479+ for (i = 0 ; i < g_offset ; i ++ ) {
480+ if (i < num_digits ) {
481+ secp256k1_scalar_mul (& prover_ctx -> r [i ], & prover_ctx -> r [i ], & t_pows [2 ]);
482+ secp256k1_scalar_add (& prover_ctx -> s [i ], & prover_ctx -> s [i ], & prover_ctx -> r [i ]);
483+
484+ secp256k1_scalar_mul (& prover_ctx -> d [i ], & prover_ctx -> d [i ], & t_pows [1 ]);
485+ secp256k1_scalar_add (& prover_ctx -> s [i ], & prover_ctx -> s [i ], & prover_ctx -> d [i ]);
486+ }
487+ if (i < digit_base ) {
488+ secp256k1_scalar_mul (& prover_ctx -> m [i ], & prover_ctx -> m [i ], & t_pows [0 ]);
489+ secp256k1_scalar_add (& prover_ctx -> s [i ], & prover_ctx -> s [i ], & prover_ctx -> m [i ]);
490+
491+ secp256k1_scalar_mul (& prover_ctx -> alpha_m [i ], & prover_ctx -> alpha_m [i ], & t_pows [3 ]);
492+ secp256k1_scalar_add (& prover_ctx -> s [i ], & prover_ctx -> s [i ], & prover_ctx -> alpha_m [i ]);
493+ }
494+ }
495+ /* Compute l = l_s + t*l_m + t^2*l_d + t^3*l_r. Store l in l_s*/
496+ for (i = 0 ; i < 6 ; i ++ ) {
497+ secp256k1_scalar tmp ;
498+ secp256k1_scalar_mul (& tmp , & prover_ctx -> l_m [i ], & t_pows [0 ]);
499+ secp256k1_scalar_add (& prover_ctx -> l_s [i ], & prover_ctx -> l_s [i ], & tmp );
500+ }
501+ /* Manually add l_d2 and l_d4 */
502+ {
503+ secp256k1_scalar tmp ;
504+ secp256k1_scalar_mul (& tmp , & prover_ctx -> l_m [3 ], & t_pows [1 ]);
505+ secp256k1_scalar_negate (& tmp , & tmp );/* l_d2 = -l_m3 */
506+ secp256k1_scalar_add (& prover_ctx -> l_s [2 ], & prover_ctx -> l_s [2 ], & tmp );
507+
508+ secp256k1_scalar_mul (& tmp , & prover_ctx -> l_m [5 ], & t_pows [1 ]);
509+ secp256k1_scalar_negate (& tmp , & tmp );/* l_d4 = -l_m5 */
510+ secp256k1_scalar_add (& prover_ctx -> l_s [4 ], & prover_ctx -> l_s [4 ], & tmp );
511+
512+ /* Add two_gamma * t5 to l_s[0] */
513+ secp256k1_scalar_add (& tmp , & t_pows [4 ], & t_pows [4 ]);
514+ secp256k1_scalar_mul (& tmp , & tmp , gamma );
515+ secp256k1_scalar_add (& prover_ctx -> l_s [0 ], & prover_ctx -> l_s [0 ], & tmp );
516+ }
517+ /* Set non used 7th and 8th l_s to 0 */
518+ secp256k1_scalar_set_int (& prover_ctx -> l_s [6 ], 0 );
519+ secp256k1_scalar_set_int (& prover_ctx -> l_s [7 ], 0 );
520+
521+ /* Make c = y*(T, T^2, T^3, T^4, T^6, T^7, 0, 0) */
522+ t_pows [4 ] = t_pows [5 ];
523+ t_pows [5 ] = t_pows [6 ];
524+ for (i = 0 ; i < 6 ; i ++ ) {
525+ secp256k1_scalar_mul (& t_pows [i ], & t_pows [i ], & prover_ctx -> y );
526+ }
527+ secp256k1_scalar_set_int (& t_pows [6 ], 0 );
528+ secp256k1_scalar_set_int (& t_pows [7 ], 0 );
529+ /* Call the norm argument on w, l */
530+
531+ return secp256k1_bulletproofs_pp_rangeproof_norm_product_prove (
532+ ctx ,
533+ scratch ,
534+ output ,
535+ output_len ,
536+ transcript ,
537+ & prover_ctx -> q_sqrt ,
538+ gens ,
539+ asset_genp ,
540+ prover_ctx -> s ,
541+ g_offset ,
542+ prover_ctx -> l_s ,
543+ 8 ,
544+ t_pows ,
545+ 8
546+ );
547+ }
548+
549+ static int secp256k1_bulletproofs_pp_rangeproof_prove_impl (
550+ const secp256k1_context * ctx ,
551+ secp256k1_scratch_space * scratch ,
552+ const secp256k1_bulletproofs_generators * gens ,
553+ const secp256k1_ge * asset_genp ,
554+ unsigned char * proof ,
555+ size_t * proof_len ,
556+ const size_t n_bits ,
557+ const size_t digit_base ,
558+ const uint64_t value ,
559+ const uint64_t min_value ,
560+ const secp256k1_ge * commitp ,
561+ const secp256k1_scalar * gamma ,
562+ const unsigned char * nonce ,
563+ const unsigned char * extra_commit ,
564+ size_t extra_commit_len
565+ ) {
566+ size_t scratch_checkpoint , n_proof_bytes_written , norm_proof_len ;
567+ secp256k1_sha256 transcript ;
568+ size_t num_digits = n_bits / secp256k1_bulletproofs_pp_log2 (digit_base );
569+ size_t h_len = 8 ;
570+ size_t g_offset = num_digits > digit_base ? num_digits : digit_base ;
571+ size_t log_n = secp256k1_bulletproofs_pp_log2 (g_offset ), log_m = secp256k1_bulletproofs_pp_log2 (h_len );
572+ size_t n_rounds = log_n > log_m ? log_n : log_m ;
573+ int res ;
574+ secp256k1_bulletproofs_pp_rangeproof_prover_context prover_ctx ;
575+ /* Check proof sizes*/
576+ if (* proof_len < 33 * 4 + (65 * n_rounds ) + 64 ) {
577+ return 0 ;
578+ }
579+ if (gens -> n != (g_offset + h_len )) {
580+ return 0 ;
581+ }
582+ if (!secp256k1_check_power_of_two (digit_base ) || !secp256k1_check_power_of_two (num_digits )) {
583+ return 0 ;
584+ }
585+ if (n_bits > 64 ) {
586+ return 0 ;
587+ }
588+ if (value < min_value ) {
589+ return 0 ;
590+ }
591+ if (n_bits < 64 && (value - min_value ) >= (1ull << n_bits )) {
592+ return 0 ;
593+ }
594+ if (extra_commit_len > 0 && extra_commit == NULL ) {
595+ return 0 ;
596+ }
597+
598+ /* Compute the base digits representation of the value */
599+ /* Alloc for prover->ctx */
600+ scratch_checkpoint = secp256k1_scratch_checkpoint (& ctx -> error_callback , scratch );
601+ prover_ctx .s = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , g_offset * sizeof (secp256k1_scalar ));
602+ prover_ctx .d = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , num_digits * sizeof (secp256k1_scalar ));
603+ prover_ctx .m = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , digit_base * sizeof (secp256k1_scalar ));
604+ prover_ctx .r = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , num_digits * sizeof (secp256k1_scalar ));
605+ prover_ctx .alpha_m = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , digit_base * sizeof (secp256k1_scalar ));
606+
607+ prover_ctx .l_m = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , 6 * sizeof (secp256k1_scalar ));
608+ prover_ctx .l_s = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , h_len * sizeof (secp256k1_scalar ));
609+
610+ prover_ctx .q_pows = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , g_offset * sizeof (secp256k1_scalar ));
611+ prover_ctx .q_inv_pows = (secp256k1_scalar * )secp256k1_scratch_alloc (& ctx -> error_callback , scratch , g_offset * sizeof (secp256k1_scalar ));
612+
613+ if ( prover_ctx .s == NULL || prover_ctx .d == NULL || prover_ctx .m == NULL || prover_ctx .r == NULL
614+ || prover_ctx .alpha_m == NULL || prover_ctx .l_m == NULL || prover_ctx .l_s == NULL
615+ || prover_ctx .q_pows == NULL || prover_ctx .q_inv_pows == NULL )
616+ {
617+ secp256k1_scratch_apply_checkpoint (& ctx -> error_callback , scratch , scratch_checkpoint );
618+ return 0 ;
619+ }
620+
621+ /* Initialze the transcript by committing to all the public data */
622+ secp256k1_bulletproofs_pp_commit_initial_data (
623+ & transcript ,
624+ num_digits ,
625+ digit_base ,
626+ min_value ,
627+ commitp ,
628+ asset_genp ,
629+ extra_commit ,
630+ extra_commit_len
631+ );
632+
633+ n_proof_bytes_written = 0 ;
634+ secp256k1_bulletproofs_pp_rangeproof_prove_round1_impl (
635+ & prover_ctx ,
636+ gens ,
637+ asset_genp ,
638+ & proof [n_proof_bytes_written ],
639+ & transcript ,
640+ num_digits ,
641+ digit_base ,
642+ value - min_value ,
643+ nonce
644+ );
645+ n_proof_bytes_written += 33 * 2 ;
646+
647+ secp256k1_bulletproofs_pp_rangeproof_prove_round2_impl (
648+ & prover_ctx ,
649+ gens ,
650+ asset_genp ,
651+ & proof [n_proof_bytes_written ],
652+ & transcript ,
653+ num_digits ,
654+ digit_base ,
655+ nonce
656+ );
657+ n_proof_bytes_written += 33 ;
658+
659+ secp256k1_bulletproofs_pp_rangeproof_prove_round3_impl (
660+ & prover_ctx ,
661+ gens ,
662+ asset_genp ,
663+ & proof [n_proof_bytes_written ],
664+ & transcript ,
665+ num_digits ,
666+ digit_base ,
667+ gamma ,
668+ nonce
669+ );
670+ n_proof_bytes_written += 33 ;
671+
672+ /* Calculate the remaining buffer size. We have already checked that buffer is of correct size */
673+ norm_proof_len = * proof_len - n_proof_bytes_written ;
674+ res = secp256k1_bulletproofs_pp_rangeproof_prove_round4_impl (
675+ ctx ,
676+ scratch ,
677+ gens ,
678+ asset_genp ,
679+ & prover_ctx ,
680+ & proof [n_proof_bytes_written ],
681+ & norm_proof_len ,
682+ & transcript ,
683+ gamma ,
684+ num_digits ,
685+ digit_base
686+ );
687+ /* No need to worry about constant time-ness from this point. All data is public */
688+ if (res ) {
689+ * proof_len = n_proof_bytes_written + norm_proof_len ;
690+ }
691+ secp256k1_scratch_apply_checkpoint (& ctx -> error_callback , scratch , scratch_checkpoint );
692+ return res ;
693+ }
694+
695+
448696#endif
0 commit comments