@@ -4443,7 +4443,7 @@ def _calculate_optimal_batch_size(self, num_participants: int, security_level: i
4443
4443
4444
4444
# For small numbers, use a smaller batch size
4445
4445
if (num_participants < 10 ):
4446
- base_batch_size = min (8 , num_participants )
4446
+ base_batch_size : int = min (8 , num_participants )
4447
4447
# Even with small participants, adjust for highly uneven share distribution
4448
4448
if num_shares is not None and num_shares > num_participants * 10 :
4449
4449
return max (2 , int (base_batch_size / 2 )) # More conservative reduction for small systems
@@ -4454,7 +4454,7 @@ def _calculate_optimal_batch_size(self, num_participants: int, security_level: i
4454
4454
if security_level is not None :
4455
4455
# Convert security level (0-10) to a reduction factor (1.0 to 0.4)
4456
4456
# Higher security = smaller batches for more granular verification
4457
- adjustment_factor = max (0.4 , 1.0 - (security_level / 15 ))
4457
+ adjustment_factor : float = max (0.4 , 1.0 - (security_level / 15 ))
4458
4458
4459
4459
# For larger systems, use a batch size that balances efficiency
4460
4460
# with the ability to quickly identify problematic shares
@@ -4466,18 +4466,21 @@ def _calculate_optimal_batch_size(self, num_participants: int, security_level: i
4466
4466
4467
4467
# Hybrid approach: Consider both logarithmic scaling and CPU count
4468
4468
# Ensure minimum reasonable batch size with the max(4, ...) operation
4469
- log_factor : int = max (4 , int (math .log2 (max (2 , num_participants )) * 4 * adjustment_factor ))
4470
- cpu_factor = max (8 , int (num_participants // cpu_count * adjustment_factor ))
4469
+ num_participants_int : int = num_participants # Type hint for clarity
4470
+ adjustment_factor_float : float = adjustment_factor # Type hint for clarity
4471
+ cpu_count_int : int = cpu_count # Type hint for clarity
4472
+ log_factor : int = max (4 , int (math .log2 (max (2 , num_participants_int )) * 4 * adjustment_factor_float ))
4473
+ cpu_factor : int = max (8 , int (num_participants_int // cpu_count_int * adjustment_factor_float ))
4471
4474
4472
4475
# Use the smaller of the two factors to keep batches manageable
4473
- batch_size = min (32 , min (log_factor , cpu_factor ))
4476
+ batch_size : int = min (32 , min (log_factor , cpu_factor ))
4474
4477
4475
4478
# If num_shares is provided, adjust for highly skewed distributions
4476
4479
if num_shares is not None and num_shares > num_participants :
4477
- shares_per_participant = num_shares / max (1 , num_participants )
4480
+ shares_per_participant : float = num_shares / max (1 , num_participants )
4478
4481
if shares_per_participant > 10 : # Only adjust for highly uneven distributions
4479
4482
# Use logarithmic scaling to avoid extreme reductions
4480
- reduction_factor = min (3 , math .log2 (shares_per_participant ) / 4 ) # Cap the reduction
4483
+ reduction_factor : float = min (3 , math .log2 (shares_per_participant ) / 4 ) # Cap the reduction
4481
4484
batch_size = max (4 , int (batch_size / reduction_factor ))
4482
4485
4483
4486
return batch_size
@@ -5185,7 +5188,7 @@ def _detect_byzantine_behavior(
5185
5188
extra_entropy = commitments [0 ][2 ] # Get extra_entropy from first coefficient
5186
5189
5187
5190
actual_commitment : FieldElement = self ._compute_hash_commitment (
5188
- y , r_combined , int ( x ) , "verify" , extra_entropy
5191
+ y , r_combined , x , "verify" , extra_entropy
5189
5192
)
5190
5193
5191
5194
evidence ["inconsistent_shares" ][recipient_id ] = {
0 commit comments