Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion benches/throughput.rs
Original file line number Diff line number Diff line change
Expand Up @@ -448,7 +448,7 @@ async fn setup_rskafka(connection: Vec<String>) -> PartitionClient {
.await
.unwrap();

client.partition_client(topic_name, 0).unwrap()
client.partition_client(topic_name, 0).await.unwrap()
}

static LOG_SETUP: Once = Once::new();
Expand Down
8 changes: 2 additions & 6 deletions src/client/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,16 +116,12 @@ impl Client {
}

/// Returns a client for performing operations on a specific partition
pub fn partition_client(
pub async fn partition_client(
&self,
topic: impl Into<String> + Send,
partition: i32,
) -> Result<PartitionClient> {
Ok(PartitionClient::new(
topic.into(),
partition,
Arc::clone(&self.brokers),
))
PartitionClient::new(topic.into(), partition, Arc::clone(&self.brokers)).await
}

/// Returns a list of topics in the cluster
Expand Down
35 changes: 29 additions & 6 deletions src/client/partition.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,10 @@ use crate::{
validation::ExactlyOne,
};
use async_trait::async_trait;
use std::ops::{ControlFlow, Deref, Range};
use std::sync::Arc;
use std::{
ops::{ControlFlow, Deref, Range},
sync::Arc,
};
use time::OffsetDateTime;
use tokio::sync::Mutex;
use tracing::{error, info};
Expand Down Expand Up @@ -95,14 +97,35 @@ impl std::fmt::Debug for PartitionClient {
}

impl PartitionClient {
pub(super) fn new(topic: String, partition: i32, brokers: Arc<BrokerConnector>) -> Self {
Self {
pub(super) async fn new(
topic: String,
partition: i32,
brokers: Arc<BrokerConnector>,
) -> Result<Self> {
let p = Self {
topic,
partition,
brokers,
brokers: Arc::clone(&brokers),
backoff_config: Default::default(),
current_broker: Mutex::new(None),
}
};

// Force discover and establish a cached connection to the leader
let scope = &p;
maybe_retry(
&Default::default(),
&*brokers,
"leader_detection",
|| async move {
scope
.get_leader(MetadataLookupMode::CachedArbitrary)
.await?;
Ok(())
},
)
.await?;

Ok(p)
}

/// Topic
Expand Down
24 changes: 15 additions & 9 deletions tests/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,10 @@ async fn test_partition_client() {
.await
.unwrap();

let partition_client = client.partition_client(topic_name.clone(), 0).unwrap();
let partition_client = client
.partition_client(topic_name.clone(), 0)
.await
.unwrap();
assert_eq!(partition_client.topic(), &topic_name);
assert_eq!(partition_client.partition(), 0);
}
Expand Down Expand Up @@ -162,7 +165,7 @@ async fn test_socks5() {
.await
.unwrap();

let partition_client = client.partition_client(topic_name, 0).unwrap();
let partition_client = client.partition_client(topic_name, 0).await.unwrap();

let record = record(b"");
partition_client
Expand Down Expand Up @@ -194,7 +197,7 @@ async fn test_produce_empty() {
.await
.unwrap();

let partition_client = client.partition_client(&topic_name, 1).unwrap();
let partition_client = client.partition_client(&topic_name, 1).await.unwrap();
partition_client
.produce(vec![], Compression::NoCompression)
.await
Expand All @@ -216,7 +219,7 @@ async fn test_consume_empty() {
.await
.unwrap();

let partition_client = client.partition_client(&topic_name, 1).unwrap();
let partition_client = client.partition_client(&topic_name, 1).await.unwrap();
let (records, watermark) = partition_client
.fetch_records(0, 1..10_000, 1_000)
.await
Expand All @@ -240,7 +243,7 @@ async fn test_consume_offset_out_of_range() {
.await
.unwrap();

let partition_client = client.partition_client(&topic_name, 1).unwrap();
let partition_client = client.partition_client(&topic_name, 1).await.unwrap();
let record = record(b"");
let offsets = partition_client
.produce(vec![record], Compression::NoCompression)
Expand Down Expand Up @@ -279,7 +282,10 @@ async fn test_get_offset() {
.await
.unwrap();

let partition_client = client.partition_client(topic_name.clone(), 0).unwrap();
let partition_client = client
.partition_client(topic_name.clone(), 0)
.await
.unwrap();

assert_eq!(
partition_client
Expand Down Expand Up @@ -340,7 +346,7 @@ async fn test_produce_consume_size_cutoff() {
.await
.unwrap();

let partition_client = Arc::new(client.partition_client(&topic_name, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic_name, 0).await.unwrap());

let record_1 = large_record();
let record_2 = large_record();
Expand Down Expand Up @@ -413,7 +419,7 @@ async fn test_consume_midbatch() {
.await
.unwrap();

let partition_client = client.partition_client(&topic_name, 0).unwrap();
let partition_client = client.partition_client(&topic_name, 0).await.unwrap();

// produce two records into a single batch
let record_1 = record(b"x");
Expand Down Expand Up @@ -458,7 +464,7 @@ async fn test_delete_records() {
.await
.unwrap();

let partition_client = client.partition_client(&topic_name, 0).unwrap();
let partition_client = client.partition_client(&topic_name, 0).await.unwrap();

// produce the following record batches:
// - record_1
Expand Down
16 changes: 8 additions & 8 deletions tests/consumer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ async fn test_stream_consumer_start_at_0() {

let record = record(b"x");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());
partition_client
.produce(vec![record.clone()], Compression::NoCompression)
.await
Expand Down Expand Up @@ -85,7 +85,7 @@ async fn test_stream_consumer_start_at_1() {
let record_1 = record(b"x");
let record_2 = record(b"y");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());
partition_client
.produce(
vec![record_1.clone(), record_2.clone()],
Expand Down Expand Up @@ -121,7 +121,7 @@ async fn test_stream_consumer_offset_out_of_range() {
.await
.unwrap();

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());

let mut stream = StreamConsumerBuilder::new(partition_client, StartOffset::At(1)).build();

Expand Down Expand Up @@ -155,7 +155,7 @@ async fn test_stream_consumer_start_at_earliest() {
let record_1 = record(b"x");
let record_2 = record(b"y");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());
partition_client
.produce(vec![record_1.clone()], Compression::NoCompression)
.await
Expand Down Expand Up @@ -204,7 +204,7 @@ async fn test_stream_consumer_start_at_earliest_empty() {

let record = record(b"x");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());

let mut stream =
StreamConsumerBuilder::new(Arc::clone(&partition_client), StartOffset::Earliest)
Expand Down Expand Up @@ -245,7 +245,7 @@ async fn test_stream_consumer_start_at_earliest_after_deletion() {
let record_1 = record(b"x");
let record_2 = record(b"y");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());
partition_client
.produce(
vec![record_1.clone(), record_2.clone()],
Expand Down Expand Up @@ -287,7 +287,7 @@ async fn test_stream_consumer_start_at_latest() {
let record_1 = record(b"x");
let record_2 = record(b"y");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());
partition_client
.produce(vec![record_1.clone()], Compression::NoCompression)
.await
Expand Down Expand Up @@ -330,7 +330,7 @@ async fn test_stream_consumer_start_at_latest_empty() {

let record = record(b"x");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());

let mut stream = StreamConsumerBuilder::new(Arc::clone(&partition_client), StartOffset::Latest)
.with_max_wait_ms(50)
Expand Down
7 changes: 6 additions & 1 deletion tests/produce_consume.rs
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,12 @@ async fn assert_produce_consume<F1, G1, F2, G2>(
.create_topic(&topic_name, n_partitions, 1, 5_000)
.await
.unwrap();
let partition_client = Arc::new(client.partition_client(topic_name.clone(), 1).unwrap());
let partition_client = Arc::new(
client
.partition_client(topic_name.clone(), 1)
.await
.unwrap(),
);

// timestamps for records. We'll reorder the messages though to ts2, ts1, ts3
let ts1 = now();
Expand Down
2 changes: 1 addition & 1 deletion tests/producer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ async fn test_batch_producer() {

let record = record(b"");

let partition_client = Arc::new(client.partition_client(&topic, 0).unwrap());
let partition_client = Arc::new(client.partition_client(&topic, 0).await.unwrap());
let producer = BatchProducerBuilder::new(partition_client)
.with_linger(Duration::from_secs(5))
.build(RecordAggregator::new(record.approximate_size() * 2 + 1));
Expand Down