Skip to content

Commit 74c38f2

Browse files
committed
Add type hints for clarity in batch size calculations and commitment function
Signed-off-by: DavidOsipov <[email protected]>
1 parent a88e47d commit 74c38f2

File tree

1 file changed

+11
-8
lines changed

1 file changed

+11
-8
lines changed

feldman_vss.py

Lines changed: 11 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4443,7 +4443,7 @@ def _calculate_optimal_batch_size(self, num_participants: int, security_level: i
44434443

44444444
# For small numbers, use a smaller batch size
44454445
if (num_participants < 10):
4446-
base_batch_size = min(8, num_participants)
4446+
base_batch_size: int = min(8, num_participants)
44474447
# Even with small participants, adjust for highly uneven share distribution
44484448
if num_shares is not None and num_shares > num_participants * 10:
44494449
return max(2, int(base_batch_size / 2)) # More conservative reduction for small systems
@@ -4454,7 +4454,7 @@ def _calculate_optimal_batch_size(self, num_participants: int, security_level: i
44544454
if security_level is not None:
44554455
# Convert security level (0-10) to a reduction factor (1.0 to 0.4)
44564456
# Higher security = smaller batches for more granular verification
4457-
adjustment_factor = max(0.4, 1.0 - (security_level / 15))
4457+
adjustment_factor: float = max(0.4, 1.0 - (security_level / 15))
44584458

44594459
# For larger systems, use a batch size that balances efficiency
44604460
# with the ability to quickly identify problematic shares
@@ -4466,18 +4466,21 @@ def _calculate_optimal_batch_size(self, num_participants: int, security_level: i
44664466

44674467
# Hybrid approach: Consider both logarithmic scaling and CPU count
44684468
# Ensure minimum reasonable batch size with the max(4, ...) operation
4469-
log_factor: int = max(4, int(math.log2(max(2, num_participants)) * 4 * adjustment_factor))
4470-
cpu_factor = max(8, int(num_participants // cpu_count * adjustment_factor))
4469+
num_participants_int: int = num_participants # Type hint for clarity
4470+
adjustment_factor_float: float = adjustment_factor # Type hint for clarity
4471+
cpu_count_int: int = cpu_count # Type hint for clarity
4472+
log_factor: int = max(4, int(math.log2(max(2, num_participants_int)) * 4 * adjustment_factor_float))
4473+
cpu_factor: int = max(8, int(num_participants_int // cpu_count_int * adjustment_factor_float))
44714474

44724475
# Use the smaller of the two factors to keep batches manageable
4473-
batch_size = min(32, min(log_factor, cpu_factor))
4476+
batch_size: int = min(32, min(log_factor, cpu_factor))
44744477

44754478
# If num_shares is provided, adjust for highly skewed distributions
44764479
if num_shares is not None and num_shares > num_participants:
4477-
shares_per_participant = num_shares / max(1, num_participants)
4480+
shares_per_participant: float = num_shares / max(1, num_participants)
44784481
if shares_per_participant > 10: # Only adjust for highly uneven distributions
44794482
# Use logarithmic scaling to avoid extreme reductions
4480-
reduction_factor = min(3, math.log2(shares_per_participant) / 4) # Cap the reduction
4483+
reduction_factor: float = min(3, math.log2(shares_per_participant) / 4) # Cap the reduction
44814484
batch_size = max(4, int(batch_size / reduction_factor))
44824485

44834486
return batch_size
@@ -5185,7 +5188,7 @@ def _detect_byzantine_behavior(
51855188
extra_entropy = commitments[0][2] # Get extra_entropy from first coefficient
51865189

51875190
actual_commitment: FieldElement = self._compute_hash_commitment(
5188-
y, r_combined, int(x), "verify", extra_entropy
5191+
y, r_combined, x, "verify", extra_entropy
51895192
)
51905193

51915194
evidence["inconsistent_shares"][recipient_id] = {

0 commit comments

Comments
 (0)