datafusion_physical_plan/coalesce/
mod.rs

1// Licensed to the Apache Software Foundation (ASF) under one
2// or more contributor license agreements.  See the NOTICE file
3// distributed with this work for additional information
4// regarding copyright ownership.  The ASF licenses this file
5// to you under the Apache License, Version 2.0 (the
6// "License"); you may not use this file except in compliance
7// with the License.  You may obtain a copy of the License at
8//
9//   http://www.apache.org/licenses/LICENSE-2.0
10//
11// Unless required by applicable law or agreed to in writing,
12// software distributed under the License is distributed on an
13// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
14// KIND, either express or implied.  See the License for the
15// specific language governing permissions and limitations
16// under the License.
17
18use arrow::array::RecordBatch;
19use arrow::compute::BatchCoalescer;
20use arrow::datatypes::SchemaRef;
21use datafusion_common::{internal_err, Result};
22
23/// Concatenate multiple [`RecordBatch`]es and apply a limit
24///
25/// See [`BatchCoalescer`] for more details on how this works.
26#[derive(Debug)]
27pub struct LimitedBatchCoalescer {
28    /// The arrow structure that builds the output batches
29    inner: BatchCoalescer,
30    /// Total number of rows returned so far
31    total_rows: usize,
32    /// Limit: maximum number of rows to fetch, `None` means fetch all rows
33    fetch: Option<usize>,
34    /// Indicates if the coalescer is finished
35    finished: bool,
36}
37
38/// Status returned by [`LimitedBatchCoalescer::push_batch`]
39#[derive(Debug, Clone, Copy, PartialEq, Eq)]
40pub enum PushBatchStatus {
41    /// The limit has **not** been reached, and more batches can be pushed
42    Continue,
43    /// The limit **has** been reached after processing this batch
44    /// The caller should call [`LimitedBatchCoalescer::finish`]
45    /// to flush any buffered rows and stop pushing more batches.
46    LimitReached,
47}
48
49impl LimitedBatchCoalescer {
50    /// Create a new `BatchCoalescer`
51    ///
52    /// # Arguments
53    /// - `schema` - the schema of the output batches
54    /// - `target_batch_size` - the minimum number of rows for each
55    ///   output batch (until limit reached)
56    /// - `fetch` - the maximum number of rows to fetch, `None` means fetch all rows
57    pub fn new(
58        schema: SchemaRef,
59        target_batch_size: usize,
60        fetch: Option<usize>,
61    ) -> Self {
62        Self {
63            inner: BatchCoalescer::new(schema, target_batch_size)
64                .with_biggest_coalesce_batch_size(Some(target_batch_size / 2)),
65            total_rows: 0,
66            fetch,
67            finished: false,
68        }
69    }
70
71    /// Return the schema of the output batches
72    pub fn schema(&self) -> SchemaRef {
73        self.inner.schema()
74    }
75
76    /// Pushes the next [`RecordBatch`] into the coalescer and returns its status.
77    ///
78    /// # Arguments
79    /// * `batch` - The [`RecordBatch`] to append.
80    ///
81    /// # Returns
82    /// * [`PushBatchStatus::Continue`] - More batches can still be pushed.
83    /// * [`PushBatchStatus::LimitReached`] - The row limit was reached after processing
84    ///   this batch. The caller should call [`Self::finish`] before retrieving the
85    ///   remaining buffered batches.
86    ///
87    /// # Errors
88    /// Returns an error if called after [`Self::finish`] or if the internal push
89    /// operation fails.
90    pub fn push_batch(&mut self, batch: RecordBatch) -> Result<PushBatchStatus> {
91        if self.finished {
92            return internal_err!(
93                "LimitedBatchCoalescer: cannot push batch after finish"
94            );
95        }
96
97        // if we are at the limit, return LimitReached
98        if let Some(fetch) = self.fetch {
99            // limit previously reached
100            if self.total_rows >= fetch {
101                return Ok(PushBatchStatus::LimitReached);
102            }
103
104            // limit now reached
105            if self.total_rows + batch.num_rows() >= fetch {
106                // Limit is reached
107                let remaining_rows = fetch - self.total_rows;
108                debug_assert!(remaining_rows > 0);
109
110                let batch_head = batch.slice(0, remaining_rows);
111                self.total_rows += batch_head.num_rows();
112                self.inner.push_batch(batch_head)?;
113                return Ok(PushBatchStatus::LimitReached);
114            }
115        }
116
117        // Limit not reached, push the entire batch
118        self.total_rows += batch.num_rows();
119        self.inner.push_batch(batch)?;
120
121        Ok(PushBatchStatus::Continue)
122    }
123
124    /// Return true if there is no data buffered
125    pub fn is_empty(&self) -> bool {
126        self.inner.is_empty()
127    }
128
129    /// Complete the current buffered batch and finish the coalescer
130    ///
131    /// Any subsequent calls to `push_batch()` will return an Err
132    pub fn finish(&mut self) -> Result<()> {
133        self.inner.finish_buffered_batch()?;
134        self.finished = true;
135        Ok(())
136    }
137
138    /// Return the next completed batch, if any
139    pub fn next_completed_batch(&mut self) -> Option<RecordBatch> {
140        self.inner.next_completed_batch()
141    }
142}
143
144#[cfg(test)]
145mod tests {
146    use super::*;
147    use std::ops::Range;
148    use std::sync::Arc;
149
150    use arrow::array::UInt32Array;
151    use arrow::compute::concat_batches;
152    use arrow::datatypes::{DataType, Field, Schema};
153
154    #[test]
155    fn test_coalesce() {
156        let batch = uint32_batch(0..8);
157        Test::new()
158            .with_batches(std::iter::repeat_n(batch, 10))
159            // expected output is batches of exactly 21 rows (except for the final batch)
160            .with_target_batch_size(21)
161            .with_expected_output_sizes(vec![21, 21, 21, 17])
162            .run()
163    }
164
165    #[test]
166    fn test_coalesce_with_fetch_larger_than_input_size() {
167        let batch = uint32_batch(0..8);
168        Test::new()
169            .with_batches(std::iter::repeat_n(batch, 10))
170            // input is 10 batches x 8 rows (80 rows) with fetch limit of 100
171            // expected to behave the same as `test_concat_batches`
172            .with_target_batch_size(21)
173            .with_fetch(Some(100))
174            .with_expected_output_sizes(vec![21, 21, 21, 17])
175            .run();
176    }
177
178    #[test]
179    fn test_coalesce_with_fetch_less_than_input_size() {
180        let batch = uint32_batch(0..8);
181        Test::new()
182            .with_batches(std::iter::repeat_n(batch, 10))
183            // input is 10 batches x 8 rows (80 rows) with fetch limit of 50
184            .with_target_batch_size(21)
185            .with_fetch(Some(50))
186            .with_expected_output_sizes(vec![21, 21, 8])
187            .run();
188    }
189
190    #[test]
191    fn test_coalesce_with_fetch_less_than_target_and_no_remaining_rows() {
192        let batch = uint32_batch(0..8);
193        Test::new()
194            .with_batches(std::iter::repeat_n(batch, 10))
195            // input is 10 batches x 8 rows (80 rows) with fetch limit of 48
196            .with_target_batch_size(24)
197            .with_fetch(Some(48))
198            .with_expected_output_sizes(vec![24, 24])
199            .run();
200    }
201
202    #[test]
203    fn test_coalesce_with_fetch_less_target_batch_size() {
204        let batch = uint32_batch(0..8);
205        Test::new()
206            .with_batches(std::iter::repeat_n(batch, 10))
207            // input is 10 batches x 8 rows (80 rows) with fetch limit of 10
208            .with_target_batch_size(21)
209            .with_fetch(Some(10))
210            .with_expected_output_sizes(vec![10])
211            .run();
212    }
213
214    #[test]
215    fn test_coalesce_single_large_batch_over_fetch() {
216        let large_batch = uint32_batch(0..100);
217        Test::new()
218            .with_batch(large_batch)
219            .with_target_batch_size(20)
220            .with_fetch(Some(7))
221            .with_expected_output_sizes(vec![7])
222            .run()
223    }
224
225    /// Test for [`LimitedBatchCoalescer`]
226    ///
227    /// Pushes the input batches to the coalescer and verifies that the resulting
228    /// batches have the expected number of rows and contents.
229    #[derive(Debug, Clone, Default)]
230    struct Test {
231        /// Batches to feed to the coalescer. Tests must have at least one
232        /// schema
233        input_batches: Vec<RecordBatch>,
234        /// Expected output sizes of the resulting batches
235        expected_output_sizes: Vec<usize>,
236        /// target batch size
237        target_batch_size: usize,
238        /// Fetch (limit)
239        fetch: Option<usize>,
240    }
241
242    impl Test {
243        fn new() -> Self {
244            Self::default()
245        }
246
247        /// Set the target batch size
248        fn with_target_batch_size(mut self, target_batch_size: usize) -> Self {
249            self.target_batch_size = target_batch_size;
250            self
251        }
252
253        /// Set the fetch (limit)
254        fn with_fetch(mut self, fetch: Option<usize>) -> Self {
255            self.fetch = fetch;
256            self
257        }
258
259        /// Extend the input batches with `batch`
260        fn with_batch(mut self, batch: RecordBatch) -> Self {
261            self.input_batches.push(batch);
262            self
263        }
264
265        /// Extends the input batches with `batches`
266        fn with_batches(
267            mut self,
268            batches: impl IntoIterator<Item = RecordBatch>,
269        ) -> Self {
270            self.input_batches.extend(batches);
271            self
272        }
273
274        /// Extends `sizes` to expected output sizes
275        fn with_expected_output_sizes(
276            mut self,
277            sizes: impl IntoIterator<Item = usize>,
278        ) -> Self {
279            self.expected_output_sizes.extend(sizes);
280            self
281        }
282
283        /// Runs the test -- see documentation on [`Test`] for details
284        fn run(self) {
285            let Self {
286                input_batches,
287                target_batch_size,
288                fetch,
289                expected_output_sizes,
290            } = self;
291
292            let schema = input_batches[0].schema();
293
294            // create a single large input batch for output comparison
295            let single_input_batch = concat_batches(&schema, &input_batches).unwrap();
296
297            let mut coalescer =
298                LimitedBatchCoalescer::new(Arc::clone(&schema), target_batch_size, fetch);
299
300            let mut output_batches = vec![];
301            for batch in input_batches {
302                match coalescer.push_batch(batch).unwrap() {
303                    PushBatchStatus::Continue => {
304                        // continue pushing batches
305                    }
306                    PushBatchStatus::LimitReached => {
307                        break;
308                    }
309                }
310            }
311            coalescer.finish().unwrap();
312            while let Some(batch) = coalescer.next_completed_batch() {
313                output_batches.push(batch);
314            }
315
316            let actual_output_sizes: Vec<usize> =
317                output_batches.iter().map(|b| b.num_rows()).collect();
318            assert_eq!(
319                expected_output_sizes, actual_output_sizes,
320                "Unexpected number of rows in output batches\n\
321                Expected\n{expected_output_sizes:#?}\nActual:{actual_output_sizes:#?}"
322            );
323
324            // make sure we got the expected number of output batches and content
325            let mut starting_idx = 0;
326            assert_eq!(expected_output_sizes.len(), output_batches.len());
327            for (i, (expected_size, batch)) in
328                expected_output_sizes.iter().zip(output_batches).enumerate()
329            {
330                assert_eq!(
331                    *expected_size,
332                    batch.num_rows(),
333                    "Unexpected number of rows in Batch {i}"
334                );
335
336                // compare the contents of the batch (using `==` compares the
337                // underlying memory layout too)
338                let expected_batch =
339                    single_input_batch.slice(starting_idx, *expected_size);
340                let batch_strings = batch_to_pretty_strings(&batch);
341                let expected_batch_strings = batch_to_pretty_strings(&expected_batch);
342                let batch_strings = batch_strings.lines().collect::<Vec<_>>();
343                let expected_batch_strings =
344                    expected_batch_strings.lines().collect::<Vec<_>>();
345                assert_eq!(
346                    expected_batch_strings, batch_strings,
347                    "Unexpected content in Batch {i}:\
348                    \n\nExpected:\n{expected_batch_strings:#?}\n\nActual:\n{batch_strings:#?}"
349                );
350                starting_idx += *expected_size;
351            }
352        }
353    }
354
355    /// Return a batch of  UInt32 with the specified range
356    fn uint32_batch(range: Range<u32>) -> RecordBatch {
357        let schema =
358            Arc::new(Schema::new(vec![Field::new("c0", DataType::UInt32, false)]));
359
360        RecordBatch::try_new(
361            Arc::clone(&schema),
362            vec![Arc::new(UInt32Array::from_iter_values(range))],
363        )
364        .unwrap()
365    }
366
367    fn batch_to_pretty_strings(batch: &RecordBatch) -> String {
368        arrow::util::pretty::pretty_format_batches(std::slice::from_ref(batch))
369            .unwrap()
370            .to_string()
371    }
372}