Skip to content

Commit a1bdd6a

Browse files
authored
Merge branch 'helius-labs:main' into triton
2 parents a2f362c + 9641911 commit a1bdd6a

File tree

110 files changed

+9754
-687
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

110 files changed

+9754
-687
lines changed

Cargo.lock

Lines changed: 454 additions & 359 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

Cargo.toml

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,6 @@ light-batched-merkle-tree = "0.3.0"
8686
light-merkle-tree-metadata = "0.3.0"
8787
light-compressed-account = { version = "0.3.0", features = ["anchor"] }
8888
light-hasher = { version = "3.1.0" }
89-
9089
light-poseidon = "0.3.0"
9190

9291
sqlx = { version = "0.6.2", features = [

src/api/method/get_batch_address_update_info.rs

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,14 @@ pub async fn get_batch_address_update_info(
5757
conn: &DatabaseConnection,
5858
request: GetBatchAddressUpdateInfoRequest,
5959
) -> Result<GetBatchAddressUpdateInfoResponse, PhotonApiError> {
60+
61+
if request.limit as usize > MAX_ADDRESSES {
62+
return Err(PhotonApiError::ValidationError(format!(
63+
"Too many addresses requested {}. Maximum allowed: {}",
64+
request.limit, MAX_ADDRESSES
65+
)));
66+
}
67+
6068
let limit = request.limit;
6169
let merkle_tree_pubkey = request.tree;
6270
let tree_info = TreeInfo::get(&merkle_tree_pubkey.to_string())

src/api/method/get_compressed_token_balances_by_owner.rs

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ pub async fn get_compressed_token_balances_by_owner(
5959
if let Some(cursor) = cursor {
6060
let bytes = cursor.0;
6161
let expected_cursor_length = 32;
62-
let mint = if bytes.len() == expected_cursor_length {
62+
let cursor_mint = if bytes.len() == expected_cursor_length {
6363
bytes.to_vec()
6464
} else {
6565
return Err(PhotonApiError::ValidationError(format!(
@@ -68,7 +68,14 @@ pub async fn get_compressed_token_balances_by_owner(
6868
bytes.len()
6969
)));
7070
};
71-
filter = filter.and(token_owner_balances::Column::Mint.gt::<Vec<u8>>(mint.into()));
71+
// Only use mint > cursor_mint if we're not filtering by a specific mint
72+
// If filtering by a specific mint, the cursor should be ignored or we'd get no results
73+
if mint.is_none() {
74+
filter =
75+
filter.and(token_owner_balances::Column::Mint.gt::<Vec<u8>>(cursor_mint.into()));
76+
}
77+
// If a specific mint is provided, we can't paginate within that mint
78+
// because there's only one record per owner-mint combination in token_owner_balances
7279
}
7380
let limit = limit.map(|l| l.value()).unwrap_or(PAGE_LIMIT);
7481

src/api/method/get_multiple_compressed_accounts.rs

Lines changed: 9 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,11 @@
1-
use std::collections::HashMap;
2-
31
use super::{super::error::PhotonApiError, utils::PAGE_LIMIT};
42
use crate::common::typedefs::account::{Account, AccountV2};
53
use crate::common::typedefs::context::Context;
64
use crate::common::typedefs::hash::Hash;
75
use crate::common::typedefs::serializable_pubkey::SerializablePubkey;
86
use crate::dao::generated::accounts;
9-
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
7+
use crate::dao::helpers::{find_accounts_by_addresses, find_accounts_by_hashes};
8+
use sea_orm::DatabaseConnection;
109
use serde::{Deserialize, Serialize};
1110
use utoipa::{
1211
openapi::{RefOr, Schema},
@@ -74,50 +73,27 @@ pub async fn fetch_accounts_from_hashes(
7473
hashes: Vec<Hash>,
7574
spent: bool,
7675
) -> Result<Vec<Option<accounts::Model>>, PhotonApiError> {
77-
let raw_hashes: Vec<Vec<u8>> = hashes.into_iter().map(|hash| hash.to_vec()).collect();
78-
79-
let accounts = accounts::Entity::find()
80-
.filter(
81-
accounts::Column::Hash
82-
.is_in(raw_hashes.clone())
83-
.and(accounts::Column::Spent.eq(spent)),
84-
)
85-
.all(conn)
76+
let hash_to_account = find_accounts_by_hashes(conn, &hashes, Some(spent))
8677
.await
8778
.map_err(|e| PhotonApiError::UnexpectedError(format!("DB error: {}", e)))?;
8879

89-
let hash_to_account: HashMap<Vec<u8>, accounts::Model> = accounts
90-
.into_iter()
91-
.map(|account| (account.hash.clone(), account))
92-
.collect();
93-
94-
Ok(raw_hashes
80+
Ok(hashes
9581
.into_iter()
96-
.map(|hash| hash_to_account.get(&hash).cloned())
82+
.map(|hash| hash_to_account.get(&hash.to_vec()).cloned())
9783
.collect())
9884
}
9985

10086
async fn fetch_account_from_addresses(
10187
conn: &DatabaseConnection,
10288
addresses: Vec<SerializablePubkey>,
10389
) -> Result<Vec<Option<accounts::Model>>, PhotonApiError> {
104-
let raw_addresses: Vec<Vec<u8>> = addresses.into_iter().map(|addr| addr.into()).collect();
105-
let accounts = accounts::Entity::find()
106-
.filter(
107-
accounts::Column::Address
108-
.is_in(raw_addresses.clone())
109-
.and(accounts::Column::Spent.eq(false)),
110-
)
111-
.all(conn)
90+
let address_to_account = find_accounts_by_addresses(conn, &addresses, Some(false))
11291
.await
11392
.map_err(|e| PhotonApiError::UnexpectedError(format!("DB error: {}", e)))?;
114-
let address_to_account: HashMap<Option<Vec<u8>>, accounts::Model> = accounts
115-
.into_iter()
116-
.map(|account| (account.address.clone(), account))
117-
.collect();
118-
Ok(raw_addresses
93+
94+
Ok(addresses
11995
.into_iter()
120-
.map(|addr| address_to_account.get(&Some(addr)).cloned())
96+
.map(|addr| address_to_account.get(&addr.to_bytes_vec()).cloned())
12197
.collect())
12298
}
12399

src/api/method/get_queue_elements.rs

Lines changed: 13 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -57,6 +57,14 @@ pub async fn get_queue_elements(
5757
request: GetQueueElementsRequest,
5858
) -> Result<GetQueueElementsResponse, PhotonApiError> {
5959
let queue_type = QueueType::from(request.queue_type as u64);
60+
61+
if request.limit > 1000 {
62+
return Err(PhotonApiError::ValidationError(format!(
63+
"Too many queue elements requested {}. Maximum allowed: 1000",
64+
request.limit
65+
)));
66+
}
67+
6068
let limit = request.limit;
6169
let context = Context::extract(conn).await?;
6270
let tx = conn.begin().await?;
@@ -73,11 +81,13 @@ pub async fn get_queue_elements(
7381

7482
match queue_type {
7583
QueueType::InputStateV2 => {
76-
query_condition =
77-
query_condition.add(accounts::Column::NullifierQueueIndex.is_not_null());
84+
query_condition = query_condition
85+
.add(accounts::Column::NullifierQueueIndex.is_not_null())
86+
.add(accounts::Column::NullifiedInTree.eq(false));
7887
if let Some(start_queue_index) = request.start_queue_index {
7988
query_condition = query_condition
80-
.add(accounts::Column::NullifierQueueIndex.gte(start_queue_index as i64));
89+
.add(accounts::Column::NullifierQueueIndex.gte(start_queue_index as i64))
90+
.add(accounts::Column::NullifiedInTree.eq(false));
8191
}
8292
}
8393
QueueType::OutputStateV2 => {

src/api/method/get_validity_proof/prover/prove.rs

Lines changed: 17 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -15,11 +15,14 @@ use crate::ingester::persist::{MerkleProofWithContext, TREE_HEIGHT_V1};
1515
use light_batched_merkle_tree::constants::{
1616
DEFAULT_BATCH_ADDRESS_TREE_HEIGHT, DEFAULT_BATCH_STATE_TREE_HEIGHT,
1717
};
18-
use light_batched_merkle_tree::merkle_tree_metadata::BatchedMerkleTreeMetadata;
1918
use reqwest::Client;
2019

2120
const STATE_TREE_QUEUE_SIZE: u64 = 2400;
2221

22+
// TODO: we should use BatchedMerkleTreeMetadata::default().root_history_capacity instead of hardcoding.
23+
// It's fixed in light-batched-merkle-tree = "0.4.2", but we need to publish all the dependencies first.
24+
const BATCHED_MERKLE_TREE_ROOT_HISTORY_CAPACITY: u64 = 200;
25+
2326
pub(crate) async fn generate_proof(
2427
db_account_proofs: Vec<MerkleProofWithContext>,
2528
db_new_address_proofs: Vec<MerkleContextWithNewAddressProof>,
@@ -86,16 +89,24 @@ pub(crate) async fn generate_proof(
8689
} else {
8790
address_tree_height
8891
};
92+
8993
let queue_size = if queue_determining_height == TREE_HEIGHT_V1 as usize {
9094
STATE_TREE_QUEUE_SIZE
9195
} else if queue_determining_height == 0 {
9296
// No proofs, default for batched (should ideally not hit if circuit_type is determined)
93-
BatchedMerkleTreeMetadata::default().root_history_capacity as u64
97+
BATCHED_MERKLE_TREE_ROOT_HISTORY_CAPACITY
9498
} else {
9599
// Batched trees
96-
BatchedMerkleTreeMetadata::default().root_history_capacity as u64
100+
BATCHED_MERKLE_TREE_ROOT_HISTORY_CAPACITY
97101
};
98102

103+
log::debug!(
104+
"Queue size: state_tree_height={}, address_tree_height={}, queue_size={}",
105+
state_tree_height,
106+
address_tree_height,
107+
queue_size
108+
);
109+
99110
let batch_inputs = HexBatchInputsForProver {
100111
circuit_type: circuit_type.to_string(),
101112
state_tree_height: state_tree_height as u32,
@@ -143,6 +154,9 @@ pub(crate) async fn generate_proof(
143154
let compressed_proof = compress_proof(&proof)?;
144155
let mut account_details = Vec::with_capacity(db_account_proofs.len());
145156
for acc_proof in db_account_proofs.iter() {
157+
log::debug!("Proof generation: tree {} leaf_index {} root_seq {} queue_size {} root_index_mod_queue {}",
158+
acc_proof.merkle_tree, acc_proof.leaf_index, acc_proof.root_seq, queue_size, acc_proof.root_seq % queue_size);
159+
146160
let tree_info = TreeInfo::get(&acc_proof.merkle_tree.to_string().as_str())
147161
.ok_or(PhotonApiError::UnexpectedError(format!(
148162
"Failed to parse TreeInfo for account tree '{}'",

src/api/method/utils.rs

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -149,15 +149,21 @@ pub async fn fetch_token_accounts(
149149
bytes.len()
150150
)));
151151
}
152-
let (mint, hash) = bytes.split_at(32);
152+
let (cursor_mint, cursor_hash) = bytes.split_at(32);
153153

154-
filter = filter.and(
155-
token_accounts::Column::Mint.gt::<Vec<u8>>(mint.into()).or(
154+
// If a specific mint is provided, only use hash for pagination within that mint
155+
if options.mint.is_some() {
156+
filter = filter.and(token_accounts::Column::Hash.gt::<Vec<u8>>(cursor_hash.into()));
157+
} else {
158+
// No specific mint, use both mint and hash for pagination
159+
filter = filter.and(
156160
token_accounts::Column::Mint
157-
.eq::<Vec<u8>>(mint.into())
158-
.and(token_accounts::Column::Hash.gt::<Vec<u8>>(hash.into())),
159-
),
160-
);
161+
.gt::<Vec<u8>>(cursor_mint.into())
162+
.or(token_accounts::Column::Mint
163+
.eq::<Vec<u8>>(cursor_mint.into())
164+
.and(token_accounts::Column::Hash.gt::<Vec<u8>>(cursor_hash.into()))),
165+
);
166+
}
161167
}
162168
if let Some(l) = options.limit {
163169
limit = l.value();
@@ -169,8 +175,6 @@ pub async fn fetch_token_accounts(
169175
.order_by(token_accounts::Column::Mint, sea_orm::Order::Asc)
170176
.order_by(token_accounts::Column::Hash, sea_orm::Order::Asc)
171177
.limit(limit)
172-
.order_by(token_accounts::Column::Mint, sea_orm::Order::Asc)
173-
.order_by(token_accounts::Column::Hash, sea_orm::Order::Asc)
174178
.all(conn)
175179
.await?
176180
.drain(..)
@@ -685,15 +689,21 @@ pub async fn fetch_token_accounts_v2(
685689
bytes.len()
686690
)));
687691
}
688-
let (mint, hash) = bytes.split_at(32);
692+
let (cursor_mint, cursor_hash) = bytes.split_at(32);
689693

690-
filter = filter.and(
691-
token_accounts::Column::Mint.gt::<Vec<u8>>(mint.into()).or(
694+
// If a specific mint is provided, only use hash for pagination within that mint
695+
if options.mint.is_some() {
696+
filter = filter.and(token_accounts::Column::Hash.gt::<Vec<u8>>(cursor_hash.into()));
697+
} else {
698+
// No specific mint, use both mint and hash for pagination
699+
filter = filter.and(
692700
token_accounts::Column::Mint
693-
.eq::<Vec<u8>>(mint.into())
694-
.and(token_accounts::Column::Hash.gt::<Vec<u8>>(hash.into())),
695-
),
696-
);
701+
.gt::<Vec<u8>>(cursor_mint.into())
702+
.or(token_accounts::Column::Mint
703+
.eq::<Vec<u8>>(cursor_mint.into())
704+
.and(token_accounts::Column::Hash.gt::<Vec<u8>>(cursor_hash.into()))),
705+
);
706+
}
697707
}
698708
if let Some(l) = options.limit {
699709
limit = l.value();

src/dao/helpers.rs

Lines changed: 97 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
use crate::common::typedefs::hash::Hash;
2+
use crate::common::typedefs::serializable_pubkey::SerializablePubkey;
3+
use crate::dao::generated::{accounts, state_trees};
4+
use sea_orm::{ColumnTrait, DatabaseConnection, EntityTrait, QueryFilter};
5+
use std::collections::HashMap;
6+
7+
/// Finds accounts by multiple hashes, optionally filtering by spent status
8+
pub async fn find_accounts_by_hashes(
9+
conn: &DatabaseConnection,
10+
hashes: &[Hash],
11+
spent_filter: Option<bool>,
12+
) -> Result<HashMap<Vec<u8>, accounts::Model>, sea_orm::DbErr> {
13+
let raw_hashes: Vec<Vec<u8>> = hashes.iter().map(|h| h.to_vec()).collect();
14+
15+
let mut query = accounts::Entity::find().filter(accounts::Column::Hash.is_in(raw_hashes));
16+
17+
if let Some(spent) = spent_filter {
18+
query = query.filter(accounts::Column::Spent.eq(spent));
19+
}
20+
21+
let accounts = query.all(conn).await?;
22+
23+
Ok(accounts
24+
.into_iter()
25+
.map(|account| (account.hash.clone(), account))
26+
.collect())
27+
}
28+
29+
/// Finds accounts by multiple addresses, optionally filtering by spent status
30+
pub async fn find_accounts_by_addresses(
31+
conn: &DatabaseConnection,
32+
addresses: &[SerializablePubkey],
33+
spent_filter: Option<bool>,
34+
) -> Result<HashMap<Vec<u8>, accounts::Model>, sea_orm::DbErr> {
35+
let raw_addresses: Vec<Vec<u8>> = addresses.iter().map(|addr| addr.to_bytes_vec()).collect();
36+
37+
let mut query = accounts::Entity::find().filter(accounts::Column::Address.is_in(raw_addresses));
38+
39+
if let Some(spent) = spent_filter {
40+
query = query.filter(accounts::Column::Spent.eq(spent));
41+
}
42+
43+
let accounts = query.all(conn).await?;
44+
45+
Ok(accounts
46+
.into_iter()
47+
.map(|account| (account.address.clone().unwrap_or_default(), account))
48+
.collect())
49+
}
50+
51+
/// Finds leaf nodes in state_trees by multiple hashes
52+
pub async fn find_leaf_nodes_by_hashes(
53+
conn: &DatabaseConnection,
54+
hashes: &[Hash],
55+
) -> Result<HashMap<Vec<u8>, state_trees::Model>, sea_orm::DbErr> {
56+
let raw_hashes: Vec<Vec<u8>> = hashes.iter().map(|h| h.to_vec()).collect();
57+
58+
let leaf_nodes = state_trees::Entity::find()
59+
.filter(
60+
state_trees::Column::Hash
61+
.is_in(raw_hashes)
62+
.and(state_trees::Column::Level.eq(0)),
63+
)
64+
.all(conn)
65+
.await?;
66+
67+
Ok(leaf_nodes
68+
.into_iter()
69+
.map(|node| (node.hash.clone(), node))
70+
.collect())
71+
}
72+
73+
/// Finds a single account by hash
74+
pub async fn find_account_by_hash(
75+
conn: &DatabaseConnection,
76+
hash: &Hash,
77+
) -> Result<Option<accounts::Model>, sea_orm::DbErr> {
78+
accounts::Entity::find()
79+
.filter(accounts::Column::Hash.eq(hash.to_vec()))
80+
.one(conn)
81+
.await
82+
}
83+
84+
/// Finds a single leaf node by hash
85+
pub async fn find_leaf_node_by_hash(
86+
conn: &DatabaseConnection,
87+
hash: &Hash,
88+
) -> Result<Option<state_trees::Model>, sea_orm::DbErr> {
89+
state_trees::Entity::find()
90+
.filter(
91+
state_trees::Column::Hash
92+
.eq(hash.to_vec())
93+
.and(state_trees::Column::Level.eq(0)),
94+
)
95+
.one(conn)
96+
.await
97+
}

src/dao/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,2 @@
11
pub mod generated;
2+
pub mod helpers;

0 commit comments

Comments
 (0)