@@ -68,6 +68,45 @@ async fn test_stream_consumer_start_at_0() {
68
68
assert_stream_pending ( & mut stream) . await ;
69
69
}
70
70
71
+ #[ tokio:: test]
72
+ async fn test_stream_consumer_start_at_1 ( ) {
73
+ maybe_start_logging ( ) ;
74
+
75
+ let connection = maybe_skip_kafka_integration ! ( ) ;
76
+ let client = ClientBuilder :: new ( vec ! [ connection] ) . build ( ) . await . unwrap ( ) ;
77
+ let controller_client = client. controller_client ( ) . await . unwrap ( ) ;
78
+
79
+ let topic = random_topic_name ( ) ;
80
+ controller_client
81
+ . create_topic ( & topic, 1 , 1 , 5_000 )
82
+ . await
83
+ . unwrap ( ) ;
84
+
85
+ let record_1 = record ( b"x" ) ;
86
+ let record_2 = record ( b"y" ) ;
87
+
88
+ let partition_client = Arc :: new ( client. partition_client ( & topic, 0 ) . await . unwrap ( ) ) ;
89
+ partition_client
90
+ . produce (
91
+ vec ! [ record_1. clone( ) , record_2. clone( ) ] ,
92
+ Compression :: NoCompression ,
93
+ )
94
+ . await
95
+ . unwrap ( ) ;
96
+
97
+ let mut stream = StreamConsumerBuilder :: new ( Arc :: clone ( & partition_client) , StartOffset :: At ( 1 ) )
98
+ . with_max_wait_ms ( 50 )
99
+ . build ( ) ;
100
+
101
+ // Skips first record
102
+ let ( record_and_offset, _watermark) =
103
+ assert_ok ( timeout ( Duration :: from_millis ( 100 ) , stream. next ( ) ) . await ) ;
104
+ assert_eq ! ( record_and_offset. record, record_2) ;
105
+
106
+ // No further records
107
+ assert_stream_pending ( & mut stream) . await ;
108
+ }
109
+
71
110
#[ tokio:: test]
72
111
async fn test_stream_consumer_offset_out_of_range ( ) {
73
112
maybe_start_logging ( ) ;
@@ -179,13 +218,55 @@ async fn test_stream_consumer_start_at_earliest_empty() {
179
218
180
219
// Get second record
181
220
let ( record_and_offset, _) =
182
- assert_ok ( timeout ( Duration :: from_millis ( 100 ) , stream. next ( ) ) . await ) ;
221
+ assert_ok ( timeout ( Duration :: from_millis ( 200 ) , stream. next ( ) ) . await ) ;
183
222
assert_eq ! ( record_and_offset. record, record) ;
184
223
185
224
// No further records
186
225
assert_stream_pending ( & mut stream) . await ;
187
226
}
188
227
228
+ #[ tokio:: test]
229
+ async fn test_stream_consumer_start_at_earliest_after_deletion ( ) {
230
+ maybe_start_logging ( ) ;
231
+
232
+ let connection = maybe_skip_kafka_integration ! ( ) ;
233
+ let client = ClientBuilder :: new ( vec ! [ connection] ) . build ( ) . await . unwrap ( ) ;
234
+ let controller_client = client. controller_client ( ) . await . unwrap ( ) ;
235
+
236
+ let topic = random_topic_name ( ) ;
237
+ controller_client
238
+ . create_topic ( & topic, 1 , 1 , 5_000 )
239
+ . await
240
+ . unwrap ( ) ;
241
+
242
+ let record_1 = record ( b"x" ) ;
243
+ let record_2 = record ( b"y" ) ;
244
+
245
+ let partition_client = Arc :: new ( client. partition_client ( & topic, 0 ) . await . unwrap ( ) ) ;
246
+ partition_client
247
+ . produce (
248
+ vec ! [ record_1. clone( ) , record_2. clone( ) ] ,
249
+ Compression :: NoCompression ,
250
+ )
251
+ . await
252
+ . unwrap ( ) ;
253
+
254
+ maybe_skip_delete ! ( partition_client, 1 ) ;
255
+
256
+ let mut stream =
257
+ StreamConsumerBuilder :: new ( Arc :: clone ( & partition_client) , StartOffset :: Earliest )
258
+ . with_max_wait_ms ( 50 )
259
+ . build ( ) ;
260
+
261
+ // First record skipped / deleted
262
+ let ( record_and_offset, _) =
263
+ assert_ok ( timeout ( Duration :: from_millis ( 100 ) , stream. next ( ) ) . await ) ;
264
+ assert_eq ! ( record_and_offset. record, record_2) ;
265
+
266
+ // No further records
267
+ assert_stream_pending ( & mut stream) . await ;
268
+ }
269
+
189
270
#[ tokio:: test]
190
271
async fn test_stream_consumer_start_at_latest ( ) {
191
272
maybe_start_logging ( ) ;
0 commit comments