Skip to content

Commit 26af770

Browse files
Merge pull request #172 from influxdata/crepererum/improve_testing_cfg
test: improve testing config
2 parents 2f855b2 + ad126c5 commit 26af770

File tree

7 files changed

+273
-152
lines changed

7 files changed

+273
-152
lines changed

.circleci/config.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -207,6 +207,7 @@ jobs:
207207
RUST_LOG: "trace"
208208
# Run integration tests
209209
TEST_INTEGRATION: 1
210+
TEST_BROKER_IMPL: redpanda
210211
TEST_JAVA_INTEROPT: 1
211212
# Don't use the first node here since this is likely the controller and we want to ensure that we automatically
212213
# pick the controller for certain actions (e.g. topic creation) and don't just get lucky.
@@ -284,8 +285,7 @@ jobs:
284285
RUST_LOG: "trace"
285286
# Run integration tests
286287
TEST_INTEGRATION: 1
287-
# Kafka support DeleteRecords
288-
TEST_DELETE_RECORDS: 1
288+
TEST_BROKER_IMPL: kafka
289289
TEST_JAVA_INTEROPT: 1
290290
# Don't use the first node here since this is likely the controller and we want to ensure that we automatically
291291
# pick the controller for certain actions (e.g. topic creation) and don't just get lucky.

README.md

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ $ docker-compose -f docker-compose-redpanda.yml up
115115
in one session, and then run:
116116

117117
```console
118-
$ TEST_INTEGRATION=1 KAFKA_CONNECT=0.0.0.0:9011 cargo test
118+
$ TEST_INTEGRATION=1 TEST_BROKER_IMPL=redpanda KAFKA_CONNECT=0.0.0.0:9011 cargo test
119119
```
120120

121121
in another session.
@@ -131,7 +131,7 @@ $ docker-compose -f docker-compose-kafka.yml up
131131
in one session, and then run:
132132

133133
```console
134-
$ TEST_INTEGRATION=1 TEST_DELETE_RECORDS=1 KAFKA_CONNECT=localhost:9011 cargo test
134+
$ TEST_INTEGRATION=1 TEST_BROKER_IMPL=kafka KAFKA_CONNECT=localhost:9011 cargo test
135135
```
136136

137137
in another session. Note that Apache Kafka supports a different set of features then redpanda, so we pass other
@@ -231,14 +231,14 @@ execution that hooks right into the place where it is about to exit:
231231
Install [cargo-criterion], make sure you have some Kafka cluster running, and then you can run all benchmarks with:
232232

233233
```console
234-
$ TEST_INTEGRATION=1 KAFKA_CONNECT=localhost:9011 cargo criterion --all-features
234+
$ TEST_INTEGRATION=1 TEST_BROKER_IMPL=kafka KAFKA_CONNECT=localhost:9011 cargo criterion --all-features
235235
```
236236

237237
If you find a benchmark that is too slow, you can may want to profile it. Get [cargo-with], and [perf], then run (here
238238
for the `parallel/rskafka` benchmark):
239239

240240
```console
241-
$ TEST_INTEGRATION=1 KAFKA_CONNECT=localhost:9011 cargo with 'perf record --call-graph dwarf -- {bin}' -- \
241+
$ TEST_INTEGRATION=1 TEST_BROKER_IMPL=kafka KAFKA_CONNECT=localhost:9011 cargo with 'perf record --call-graph dwarf -- {bin}' -- \
242242
bench --all-features --bench write_throughput -- \
243243
--bench --noplot parallel/rskafka
244244
```

tests/client.rs

Lines changed: 64 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -10,22 +10,28 @@ use rskafka::{
1010
use std::{collections::BTreeMap, str::FromStr, sync::Arc, time::Duration};
1111

1212
mod test_helpers;
13-
use test_helpers::{maybe_start_logging, now, random_topic_name, record};
13+
use test_helpers::{maybe_start_logging, now, random_topic_name, record, TEST_TIMEOUT};
1414

1515
#[tokio::test]
1616
async fn test_plain() {
1717
maybe_start_logging();
1818

19-
let connection = maybe_skip_kafka_integration!();
20-
ClientBuilder::new(connection).build().await.unwrap();
19+
let test_cfg = maybe_skip_kafka_integration!();
20+
ClientBuilder::new(test_cfg.bootstrap_brokers)
21+
.build()
22+
.await
23+
.unwrap();
2124
}
2225

2326
#[tokio::test]
2427
async fn test_topic_crud() {
2528
maybe_start_logging();
2629

27-
let connection = maybe_skip_kafka_integration!();
28-
let client = ClientBuilder::new(connection).build().await.unwrap();
30+
let test_cfg = maybe_skip_kafka_integration!();
31+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
32+
.build()
33+
.await
34+
.unwrap();
2935
let controller_client = client.controller_client().unwrap();
3036
let topics = client.list_topics().await.unwrap();
3137

@@ -46,7 +52,7 @@ async fn test_topic_crud() {
4652
.unwrap();
4753

4854
// might take a while to converge
49-
tokio::time::timeout(Duration::from_millis(1_000), async {
55+
tokio::time::timeout(TEST_TIMEOUT, async {
5056
loop {
5157
let topics = client.list_topics().await.unwrap();
5258
let topic = topics.iter().find(|t| t.name == new_topic);
@@ -77,10 +83,13 @@ async fn test_topic_crud() {
7783
async fn test_partition_client() {
7884
maybe_start_logging();
7985

80-
let connection = maybe_skip_kafka_integration!();
86+
let test_cfg = maybe_skip_kafka_integration!();
8187
let topic_name = random_topic_name();
8288

83-
let client = ClientBuilder::new(connection).build().await.unwrap();
89+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
90+
.build()
91+
.await
92+
.unwrap();
8493

8594
let controller_client = client.controller_client().unwrap();
8695
controller_client
@@ -100,13 +109,17 @@ async fn test_partition_client() {
100109
async fn test_non_existing_partition() {
101110
maybe_start_logging();
102111

103-
let connection = maybe_skip_kafka_integration!();
112+
let test_cfg = maybe_skip_kafka_integration!();
104113
let topic_name = random_topic_name();
105114

106-
let client = ClientBuilder::new(connection).build().await.unwrap();
115+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
116+
.build()
117+
.await
118+
.unwrap();
107119

108120
// do NOT create the topic
109121

122+
// short timeout, should just check that we will never finish
110123
tokio::time::timeout(Duration::from_millis(100), async {
111124
client
112125
.partition_client(topic_name.clone(), 0, UnknownTopicHandling::Retry)
@@ -167,8 +180,8 @@ async fn test_tls() {
167180
.with_single_cert(vec![producer_root], private_key)
168181
.unwrap();
169182

170-
let connection = maybe_skip_kafka_integration!();
171-
ClientBuilder::new(connection)
183+
let test_cfg = maybe_skip_kafka_integration!();
184+
ClientBuilder::new(test_cfg.bootstrap_brokers)
172185
.tls_config(Arc::new(config))
173186
.build()
174187
.await
@@ -180,14 +193,11 @@ async fn test_tls() {
180193
async fn test_socks5() {
181194
maybe_start_logging();
182195

183-
// e.g. "my-connection-kafka-bootstrap:9092"
184-
let connection = maybe_skip_kafka_integration!();
185-
// e.g. "localhost:1080"
186-
let proxy = maybe_skip_SOCKS_PROXY!();
196+
let test_cfg = maybe_skip_kafka_integration!(socks5);
187197
let topic_name = random_topic_name();
188198

189-
let client = ClientBuilder::new(connection)
190-
.socks5_proxy(proxy)
199+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
200+
.socks5_proxy(test_cfg.socks5_proxy.unwrap())
191201
.build()
192202
.await
193203
.unwrap();
@@ -222,11 +232,14 @@ async fn test_socks5() {
222232
async fn test_produce_empty() {
223233
maybe_start_logging();
224234

225-
let connection = maybe_skip_kafka_integration!();
235+
let test_cfg = maybe_skip_kafka_integration!();
226236
let topic_name = random_topic_name();
227237
let n_partitions = 2;
228238

229-
let client = ClientBuilder::new(connection).build().await.unwrap();
239+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
240+
.build()
241+
.await
242+
.unwrap();
230243
let controller_client = client.controller_client().unwrap();
231244
controller_client
232245
.create_topic(&topic_name, n_partitions, 1, 5_000)
@@ -247,11 +260,14 @@ async fn test_produce_empty() {
247260
async fn test_consume_empty() {
248261
maybe_start_logging();
249262

250-
let connection = maybe_skip_kafka_integration!();
263+
let test_cfg = maybe_skip_kafka_integration!();
251264
let topic_name = random_topic_name();
252265
let n_partitions = 2;
253266

254-
let client = ClientBuilder::new(connection).build().await.unwrap();
267+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
268+
.build()
269+
.await
270+
.unwrap();
255271
let controller_client = client.controller_client().unwrap();
256272
controller_client
257273
.create_topic(&topic_name, n_partitions, 1, 5_000)
@@ -274,11 +290,14 @@ async fn test_consume_empty() {
274290
async fn test_consume_offset_out_of_range() {
275291
maybe_start_logging();
276292

277-
let connection = maybe_skip_kafka_integration!();
293+
let test_cfg = maybe_skip_kafka_integration!();
278294
let topic_name = random_topic_name();
279295
let n_partitions = 2;
280296

281-
let client = ClientBuilder::new(connection).build().await.unwrap();
297+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
298+
.build()
299+
.await
300+
.unwrap();
282301
let controller_client = client.controller_client().unwrap();
283302
controller_client
284303
.create_topic(&topic_name, n_partitions, 1, 5_000)
@@ -314,11 +333,11 @@ async fn test_consume_offset_out_of_range() {
314333
async fn test_get_offset() {
315334
maybe_start_logging();
316335

317-
let connection = maybe_skip_kafka_integration!();
336+
let test_cfg = maybe_skip_kafka_integration!();
318337
let topic_name = random_topic_name();
319338
let n_partitions = 1;
320339

321-
let client = ClientBuilder::new(connection.clone())
340+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers.clone())
322341
.build()
323342
.await
324343
.unwrap();
@@ -382,10 +401,13 @@ async fn test_get_offset() {
382401
async fn test_produce_consume_size_cutoff() {
383402
maybe_start_logging();
384403

385-
let connection = maybe_skip_kafka_integration!();
404+
let test_cfg = maybe_skip_kafka_integration!();
386405
let topic_name = random_topic_name();
387406

388-
let client = ClientBuilder::new(connection).build().await.unwrap();
407+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
408+
.build()
409+
.await
410+
.unwrap();
389411
let controller_client = client.controller_client().unwrap();
390412
controller_client
391413
.create_topic(&topic_name, 1, 1, 5_000)
@@ -460,10 +482,13 @@ async fn test_produce_consume_size_cutoff() {
460482
async fn test_consume_midbatch() {
461483
maybe_start_logging();
462484

463-
let connection = maybe_skip_kafka_integration!();
485+
let test_cfg = maybe_skip_kafka_integration!();
464486
let topic_name = random_topic_name();
465487

466-
let client = ClientBuilder::new(connection).build().await.unwrap();
488+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
489+
.build()
490+
.await
491+
.unwrap();
467492
let controller_client = client.controller_client().unwrap();
468493
controller_client
469494
.create_topic(&topic_name, 1, 1, 5_000)
@@ -508,10 +533,13 @@ async fn test_consume_midbatch() {
508533
async fn test_delete_records() {
509534
maybe_start_logging();
510535

511-
let connection = maybe_skip_kafka_integration!();
536+
let test_cfg = maybe_skip_kafka_integration!(delete);
512537
let topic_name = random_topic_name();
513538

514-
let client = ClientBuilder::new(connection).build().await.unwrap();
539+
let client = ClientBuilder::new(test_cfg.bootstrap_brokers)
540+
.build()
541+
.await
542+
.unwrap();
515543
let controller_client = client.controller_client().unwrap();
516544
controller_client
517545
.create_topic(&topic_name, 1, 1, 5_000)
@@ -555,7 +583,10 @@ async fn test_delete_records() {
555583
let offset_4 = offsets[0];
556584

557585
// delete from the middle of the 2nd batch
558-
maybe_skip_delete!(partition_client, offset_3);
586+
partition_client
587+
.delete_records(offset_3, 1_000)
588+
.await
589+
.unwrap();
559590

560591
// fetching data before the record fails
561592
let err = partition_client

0 commit comments

Comments
 (0)