2023-02-02 13:59:27 +00:00
|
|
|
use crate::infer::InferError;
|
|
|
|
use crate::infer::InferStreamResponse;
|
|
|
|
use crate::validation::ValidGenerateRequest;
|
|
|
|
use nohash_hasher::{BuildNoHashHasher, IntMap};
|
2023-04-20 09:07:40 +00:00
|
|
|
use std::collections::VecDeque;
|
2023-02-02 13:59:27 +00:00
|
|
|
use text_generation_client::{Batch, Request};
|
2023-04-20 09:07:40 +00:00
|
|
|
use tokio::sync::oneshot;
|
2023-02-02 13:59:27 +00:00
|
|
|
use tokio::time::Instant;
|
2023-02-13 12:02:45 +00:00
|
|
|
use tracing::{info_span, instrument, Span};
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
/// Queue entry
|
|
|
|
#[derive(Debug)]
|
|
|
|
pub(crate) struct Entry {
|
|
|
|
/// Request
|
|
|
|
pub request: ValidGenerateRequest,
|
|
|
|
/// Response sender to communicate between the Infer struct and the batching_task
|
2023-04-09 18:22:27 +00:00
|
|
|
pub response_tx: flume::Sender<Result<InferStreamResponse, InferError>>,
|
2023-02-13 12:02:45 +00:00
|
|
|
/// Span that will live as long as entry
|
|
|
|
pub span: Span,
|
|
|
|
/// Temporary span used as a guard when logging inference, wait times...
|
|
|
|
pub temp_span: Option<Span>,
|
|
|
|
/// Instant when this entry was queued
|
|
|
|
pub queue_time: Instant,
|
2023-02-02 13:59:27 +00:00
|
|
|
/// Instant when this entry was added to a batch
|
|
|
|
pub batch_time: Option<Instant>,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Request Queue
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
pub(crate) struct Queue {
|
|
|
|
/// Channel to communicate with the background queue task
|
2023-04-09 18:22:27 +00:00
|
|
|
queue_sender: flume::Sender<QueueCommand>,
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Queue {
|
2023-04-24 15:59:00 +00:00
|
|
|
pub(crate) fn new(requires_padding: bool) -> Self {
|
2023-02-02 13:59:27 +00:00
|
|
|
// Create channel
|
2023-04-09 18:22:27 +00:00
|
|
|
let (queue_sender, queue_receiver) = flume::unbounded();
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
// Launch background queue task
|
2023-04-24 15:59:00 +00:00
|
|
|
tokio::spawn(queue_task(requires_padding, queue_receiver));
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
Self { queue_sender }
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Append an entry to the queue
|
2023-02-13 12:02:45 +00:00
|
|
|
#[instrument(skip_all)]
|
2023-02-02 13:59:27 +00:00
|
|
|
pub(crate) fn append(&self, entry: Entry) {
|
|
|
|
// Send append command to the background task managing the state
|
|
|
|
// Unwrap is safe here
|
2023-02-13 12:02:45 +00:00
|
|
|
self.queue_sender
|
|
|
|
.send(QueueCommand::Append(entry, Span::current()))
|
|
|
|
.unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get the next batch
|
2023-02-13 12:02:45 +00:00
|
|
|
#[instrument(skip(self))]
|
2023-02-02 13:59:27 +00:00
|
|
|
pub(crate) async fn next_batch(
|
|
|
|
&self,
|
|
|
|
min_size: Option<usize>,
|
2023-04-24 15:59:00 +00:00
|
|
|
token_budget: u32,
|
2023-02-02 13:59:27 +00:00
|
|
|
) -> Option<NextBatch> {
|
|
|
|
// Create response channel
|
|
|
|
let (response_sender, response_receiver) = oneshot::channel();
|
|
|
|
// Send next batch command to the background task managing the state
|
|
|
|
// Unwrap is safe here
|
|
|
|
self.queue_sender
|
|
|
|
.send(QueueCommand::NextBatch {
|
|
|
|
min_size,
|
2023-04-24 15:59:00 +00:00
|
|
|
token_budget,
|
2023-02-02 13:59:27 +00:00
|
|
|
response_sender,
|
2023-02-13 12:02:45 +00:00
|
|
|
span: Span::current(),
|
2023-02-02 13:59:27 +00:00
|
|
|
})
|
|
|
|
.unwrap();
|
|
|
|
// Await on response channel
|
|
|
|
// Unwrap is safe here
|
|
|
|
response_receiver.await.unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Background task responsible of the queue state
|
2023-04-24 15:59:00 +00:00
|
|
|
async fn queue_task(requires_padding: bool, receiver: flume::Receiver<QueueCommand>) {
|
|
|
|
let mut state = State::new(requires_padding);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-04-09 18:22:27 +00:00
|
|
|
while let Ok(cmd) = receiver.recv_async().await {
|
2023-02-02 13:59:27 +00:00
|
|
|
match cmd {
|
2023-04-24 15:59:00 +00:00
|
|
|
QueueCommand::Append(entry, span) => {
|
|
|
|
span.in_scope(|| state.append(entry));
|
|
|
|
metrics::increment_gauge!("tgi_queue_size", 1.0);
|
|
|
|
}
|
2023-02-02 13:59:27 +00:00
|
|
|
QueueCommand::NextBatch {
|
|
|
|
min_size,
|
2023-04-24 15:59:00 +00:00
|
|
|
token_budget,
|
2023-02-02 13:59:27 +00:00
|
|
|
response_sender,
|
2023-02-13 12:02:45 +00:00
|
|
|
span,
|
|
|
|
} => span.in_scope(|| {
|
2023-04-24 15:59:00 +00:00
|
|
|
let next_batch = state.next_batch(min_size, token_budget);
|
2023-02-02 13:59:27 +00:00
|
|
|
response_sender.send(next_batch).unwrap_or(());
|
2023-04-24 15:59:00 +00:00
|
|
|
metrics::gauge!("tgi_queue_size", state.entries.len() as f64);
|
2023-02-13 12:02:45 +00:00
|
|
|
}),
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Queue State
|
|
|
|
#[derive(Debug)]
|
|
|
|
struct State {
|
|
|
|
/// Queue entries organized in a Vec
|
2023-04-20 09:07:40 +00:00
|
|
|
entries: VecDeque<(u64, Entry)>,
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
/// Id of the next entry
|
|
|
|
next_id: u64,
|
|
|
|
|
|
|
|
/// Id of the next batch
|
|
|
|
next_batch_id: u64,
|
2023-04-24 15:59:00 +00:00
|
|
|
|
|
|
|
/// Whether the model is using padding
|
|
|
|
requires_padding: bool,
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl State {
|
2023-04-24 15:59:00 +00:00
|
|
|
fn new(requires_padding: bool) -> Self {
|
2023-02-02 13:59:27 +00:00
|
|
|
Self {
|
2023-04-20 09:07:40 +00:00
|
|
|
entries: VecDeque::with_capacity(128),
|
2023-02-02 13:59:27 +00:00
|
|
|
next_id: 0,
|
|
|
|
next_batch_id: 0,
|
2023-04-24 15:59:00 +00:00
|
|
|
requires_padding,
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Append an entry to the queue
|
2023-02-13 12:02:45 +00:00
|
|
|
fn append(&mut self, mut entry: Entry) {
|
|
|
|
// Create a span that will live as long as the entry is in the queue waiting to be batched
|
|
|
|
let queue_span = info_span!(parent: &entry.span, "queued");
|
|
|
|
entry.temp_span = Some(queue_span);
|
|
|
|
|
|
|
|
// Push entry in the queue
|
2023-04-20 09:07:40 +00:00
|
|
|
self.entries.push_back((self.next_id, entry));
|
2023-02-02 13:59:27 +00:00
|
|
|
self.next_id += 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get the next batch
|
2023-04-24 15:59:00 +00:00
|
|
|
fn next_batch(&mut self, min_size: Option<usize>, token_budget: u32) -> Option<NextBatch> {
|
2023-02-02 13:59:27 +00:00
|
|
|
if self.entries.is_empty() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if we have enough entries
|
|
|
|
if let Some(min_size) = min_size {
|
|
|
|
if self.entries.len() < min_size {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
// Create span for this batch to add context to inference calls
|
2023-04-20 09:07:40 +00:00
|
|
|
let next_batch_span = info_span!(parent: None, "batch", batch_size = tracing::field::Empty);
|
2023-02-13 12:02:45 +00:00
|
|
|
next_batch_span.follows_from(&Span::current());
|
|
|
|
|
2023-04-24 15:59:00 +00:00
|
|
|
let mut batch_requests = Vec::with_capacity(self.entries.len());
|
2023-02-02 13:59:27 +00:00
|
|
|
let mut batch_entries =
|
2023-04-24 15:59:00 +00:00
|
|
|
IntMap::with_capacity_and_hasher(self.entries.len(), BuildNoHashHasher::default());
|
2023-04-20 09:07:40 +00:00
|
|
|
|
2023-04-24 15:59:00 +00:00
|
|
|
let mut max_input_length = 0;
|
|
|
|
let mut prefill_tokens: u32 = 0;
|
2023-04-26 11:40:20 +00:00
|
|
|
let mut max_decode_steps: u32 = u32::MAX;
|
2023-04-24 15:59:00 +00:00
|
|
|
|
|
|
|
// Pop entries starting from the front of the queue
|
2023-04-20 09:07:40 +00:00
|
|
|
while let Some((id, mut entry)) = self.entries.pop_front() {
|
|
|
|
// Filter entries where the response receiver was dropped (== entries where the request
|
|
|
|
// was dropped by the client)
|
|
|
|
if entry.response_tx.is_disconnected() {
|
|
|
|
metrics::increment_counter!("tgi_request_failure", "err" => "dropped");
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-04-24 15:59:00 +00:00
|
|
|
if self.requires_padding {
|
|
|
|
// We pad to max input length in the Python shards
|
|
|
|
// We need to take these padding tokens into the equation
|
|
|
|
max_input_length = max_input_length.max(entry.request.input_length);
|
|
|
|
prefill_tokens = (batch_requests.len() + 1) as u32 * max_input_length
|
|
|
|
} else {
|
|
|
|
prefill_tokens += entry.request.input_length;
|
|
|
|
}
|
|
|
|
|
2023-04-26 11:40:20 +00:00
|
|
|
max_decode_steps =
|
|
|
|
max_decode_steps.min(entry.request.stopping_parameters.max_new_tokens);
|
|
|
|
|
|
|
|
let decode_tokens = max_decode_steps * (batch_requests.len() + 1) as u32;
|
2023-04-24 15:59:00 +00:00
|
|
|
|
|
|
|
if (prefill_tokens + decode_tokens) > token_budget {
|
|
|
|
// Entry is over budget
|
|
|
|
// Add it back to the front
|
|
|
|
self.entries.push_front((id, entry));
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-04-20 09:07:40 +00:00
|
|
|
// Create a new span to link the batch back to this entry
|
|
|
|
let entry_batch_span = info_span!(parent: &entry.span, "infer");
|
|
|
|
// Add relationships
|
|
|
|
next_batch_span.follows_from(&entry_batch_span);
|
|
|
|
entry_batch_span.follows_from(&next_batch_span);
|
|
|
|
// Update entry
|
|
|
|
entry.temp_span = Some(entry_batch_span);
|
|
|
|
|
|
|
|
batch_requests.push(Request {
|
|
|
|
id,
|
|
|
|
inputs: entry.request.inputs.clone(),
|
|
|
|
truncate: entry.request.truncate,
|
|
|
|
parameters: Some(entry.request.parameters.clone()),
|
|
|
|
stopping_parameters: Some(entry.request.stopping_parameters.clone()),
|
2023-02-02 13:59:27 +00:00
|
|
|
});
|
2023-04-20 09:07:40 +00:00
|
|
|
// Set batch_time
|
|
|
|
entry.batch_time = Some(Instant::now());
|
|
|
|
// Insert in batch_entries IntMap
|
|
|
|
batch_entries.insert(id, entry);
|
|
|
|
}
|
|
|
|
|
2023-04-24 15:59:00 +00:00
|
|
|
// Empty batch
|
2023-04-20 09:07:40 +00:00
|
|
|
if batch_requests.is_empty() {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2023-04-24 15:59:00 +00:00
|
|
|
// Check if our batch is big enough
|
|
|
|
if let Some(min_size) = min_size {
|
|
|
|
// Batch is too small
|
|
|
|
if batch_requests.len() < min_size {
|
|
|
|
// Add back entries to the queue in the correct order
|
|
|
|
for r in batch_requests.into_iter().rev() {
|
|
|
|
let id = r.id;
|
|
|
|
let entry = batch_entries.remove(&id).unwrap();
|
|
|
|
self.entries.push_front((id, entry));
|
|
|
|
}
|
|
|
|
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Final batch size
|
2023-04-20 09:07:40 +00:00
|
|
|
let size = batch_requests.len() as u32;
|
|
|
|
next_batch_span.record("batch_size", size);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-04-26 11:40:20 +00:00
|
|
|
let decode_tokens = size * max_decode_steps;
|
|
|
|
|
2023-02-02 13:59:27 +00:00
|
|
|
let batch = Batch {
|
|
|
|
id: self.next_batch_id,
|
|
|
|
requests: batch_requests,
|
2023-04-20 09:07:40 +00:00
|
|
|
size,
|
2023-04-24 15:59:00 +00:00
|
|
|
max_tokens: (prefill_tokens + decode_tokens),
|
2023-02-02 13:59:27 +00:00
|
|
|
};
|
|
|
|
// Increment batch id
|
|
|
|
self.next_batch_id += 1;
|
|
|
|
|
2023-02-16 16:18:53 +00:00
|
|
|
metrics::histogram!("tgi_batch_next_size", batch.size as f64);
|
2023-04-24 15:59:00 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
Some((batch_entries, batch, next_batch_span))
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
type NextBatch = (IntMap<u64, Entry>, Batch, Span);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
#[derive(Debug)]
|
|
|
|
enum QueueCommand {
|
2023-02-13 12:02:45 +00:00
|
|
|
Append(Entry, Span),
|
2023-02-02 13:59:27 +00:00
|
|
|
NextBatch {
|
|
|
|
min_size: Option<usize>,
|
2023-04-24 15:59:00 +00:00
|
|
|
token_budget: u32,
|
2023-02-02 13:59:27 +00:00
|
|
|
response_sender: oneshot::Sender<Option<NextBatch>>,
|
2023-02-13 12:02:45 +00:00
|
|
|
span: Span,
|
2023-02-02 13:59:27 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
#[cfg(test)]
|
|
|
|
mod tests {
|
|
|
|
use super::*;
|
|
|
|
use text_generation_client::{NextTokenChooserParameters, StoppingCriteriaParameters};
|
2023-02-13 12:02:45 +00:00
|
|
|
use tracing::info_span;
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-04-20 09:07:40 +00:00
|
|
|
fn default_entry() -> (
|
|
|
|
Entry,
|
|
|
|
flume::Receiver<Result<InferStreamResponse, InferError>>,
|
|
|
|
) {
|
|
|
|
let (response_tx, receiver_tx) = flume::unbounded();
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-04-20 09:07:40 +00:00
|
|
|
let entry = Entry {
|
2023-02-02 13:59:27 +00:00
|
|
|
request: ValidGenerateRequest {
|
|
|
|
inputs: "".to_string(),
|
2023-04-24 15:59:00 +00:00
|
|
|
input_length: 0,
|
2023-04-09 18:22:27 +00:00
|
|
|
truncate: 0,
|
2023-02-02 13:59:27 +00:00
|
|
|
parameters: NextTokenChooserParameters {
|
|
|
|
temperature: 0.0,
|
|
|
|
top_k: 0,
|
|
|
|
top_p: 0.0,
|
2023-03-09 10:33:57 +00:00
|
|
|
typical_p: 0.0,
|
2023-02-02 13:59:27 +00:00
|
|
|
do_sample: false,
|
|
|
|
seed: 0,
|
|
|
|
repetition_penalty: 0.0,
|
2023-03-06 13:39:36 +00:00
|
|
|
watermark: false,
|
2023-02-02 13:59:27 +00:00
|
|
|
},
|
|
|
|
stopping_parameters: StoppingCriteriaParameters {
|
2023-03-30 13:26:27 +00:00
|
|
|
ignore_eos_token: false,
|
2023-04-24 15:59:00 +00:00
|
|
|
max_new_tokens: 1,
|
2023-02-02 13:59:27 +00:00
|
|
|
stop_sequences: vec![],
|
|
|
|
},
|
|
|
|
},
|
|
|
|
response_tx,
|
2023-02-13 12:02:45 +00:00
|
|
|
span: info_span!("entry"),
|
|
|
|
temp_span: None,
|
|
|
|
queue_time: Instant::now(),
|
2023-02-02 13:59:27 +00:00
|
|
|
batch_time: None,
|
2023-04-20 09:07:40 +00:00
|
|
|
};
|
|
|
|
(entry, receiver_tx)
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_append() {
|
2023-04-24 15:59:00 +00:00
|
|
|
let mut state = State::new(false);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry, _guard) = default_entry();
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
assert_eq!(state.next_id, 0);
|
|
|
|
assert_eq!(state.entries.len(), 0);
|
|
|
|
|
|
|
|
state.append(entry);
|
|
|
|
|
|
|
|
assert_eq!(state.next_id, 1);
|
|
|
|
assert_eq!(state.entries.len(), 1);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (id, _) = state.entries.remove(0).unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(id, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_next_batch_empty() {
|
2023-04-24 15:59:00 +00:00
|
|
|
let mut state = State::new(false);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
assert!(state.next_batch(None, 1).is_none());
|
|
|
|
assert!(state.next_batch(Some(1), 1).is_none());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
|
|
|
fn test_next_batch_min_size() {
|
2023-04-24 15:59:00 +00:00
|
|
|
let mut state = State::new(false);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry1, _guard1) = default_entry();
|
|
|
|
let (entry2, _guard2) = default_entry();
|
|
|
|
state.append(entry1);
|
|
|
|
state.append(entry2);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
let (entries, batch, _) = state.next_batch(None, 2).unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(entries.len(), 2);
|
|
|
|
assert!(entries.contains_key(&0));
|
|
|
|
assert!(entries.contains_key(&1));
|
|
|
|
assert!(entries.get(&0).unwrap().batch_time.is_some());
|
|
|
|
assert!(entries.get(&1).unwrap().batch_time.is_some());
|
|
|
|
assert_eq!(batch.id, 0);
|
|
|
|
assert_eq!(batch.size, 2);
|
|
|
|
|
|
|
|
assert_eq!(state.next_id, 2);
|
|
|
|
assert_eq!(state.entries.len(), 0);
|
|
|
|
assert_eq!(state.next_batch_id, 1);
|
|
|
|
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry3, _guard3) = default_entry();
|
|
|
|
state.append(entry3);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
assert!(state.next_batch(Some(2), 2).is_none());
|
|
|
|
|
|
|
|
assert_eq!(state.next_id, 3);
|
|
|
|
assert_eq!(state.entries.len(), 1);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (id, _) = state.entries.remove(0).unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(id, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[test]
|
2023-04-24 15:59:00 +00:00
|
|
|
fn test_next_batch_token_budget() {
|
|
|
|
let mut state = State::new(false);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry1, _guard1) = default_entry();
|
|
|
|
let (entry2, _guard2) = default_entry();
|
|
|
|
state.append(entry1);
|
|
|
|
state.append(entry2);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
let (entries, batch, _) = state.next_batch(None, 1).unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(entries.len(), 1);
|
|
|
|
assert!(entries.contains_key(&0));
|
|
|
|
assert_eq!(batch.id, 0);
|
|
|
|
assert_eq!(batch.size, 1);
|
|
|
|
|
|
|
|
assert_eq!(state.next_id, 2);
|
|
|
|
assert_eq!(state.entries.len(), 1);
|
|
|
|
assert_eq!(state.next_batch_id, 1);
|
|
|
|
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry3, _guard3) = default_entry();
|
|
|
|
state.append(entry3);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
let (entries, batch, _) = state.next_batch(None, 3).unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(entries.len(), 2);
|
|
|
|
assert!(entries.contains_key(&1));
|
|
|
|
assert!(entries.contains_key(&2));
|
|
|
|
assert_eq!(batch.id, 1);
|
|
|
|
assert_eq!(batch.size, 2);
|
|
|
|
|
|
|
|
assert_eq!(state.next_id, 3);
|
|
|
|
assert_eq!(state.entries.len(), 0);
|
|
|
|
assert_eq!(state.next_batch_id, 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_queue_append() {
|
2023-04-24 15:59:00 +00:00
|
|
|
let queue = Queue::new(false);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry, _guard) = default_entry();
|
|
|
|
queue.append(entry);
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_queue_next_batch_empty() {
|
2023-04-24 15:59:00 +00:00
|
|
|
let queue = Queue::new(false);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
assert!(queue.next_batch(None, 1).await.is_none());
|
|
|
|
assert!(queue.next_batch(Some(1), 1).await.is_none());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_queue_next_batch_min_size() {
|
2023-04-24 15:59:00 +00:00
|
|
|
let queue = Queue::new(false);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry1, _guard1) = default_entry();
|
|
|
|
let (entry2, _guard2) = default_entry();
|
|
|
|
queue.append(entry1);
|
|
|
|
queue.append(entry2);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
let (entries, batch, _) = queue.next_batch(None, 2).await.unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(entries.len(), 2);
|
|
|
|
assert!(entries.contains_key(&0));
|
|
|
|
assert!(entries.contains_key(&1));
|
|
|
|
assert!(entries.get(&0).unwrap().batch_time.is_some());
|
|
|
|
assert!(entries.get(&1).unwrap().batch_time.is_some());
|
|
|
|
assert_eq!(batch.id, 0);
|
|
|
|
assert_eq!(batch.size, 2);
|
|
|
|
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry3, _guard3) = default_entry();
|
|
|
|
queue.append(entry3);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
|
|
|
assert!(queue.next_batch(Some(2), 2).await.is_none());
|
|
|
|
}
|
|
|
|
|
|
|
|
#[tokio::test]
|
2023-04-24 15:59:00 +00:00
|
|
|
async fn test_queue_next_batch_token_budget() {
|
|
|
|
let queue = Queue::new(false);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry1, _guard1) = default_entry();
|
|
|
|
let (entry2, _guard2) = default_entry();
|
|
|
|
queue.append(entry1);
|
|
|
|
queue.append(entry2);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
let (entries, batch, _) = queue.next_batch(None, 1).await.unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(entries.len(), 1);
|
|
|
|
assert!(entries.contains_key(&0));
|
|
|
|
assert_eq!(batch.id, 0);
|
|
|
|
assert_eq!(batch.size, 1);
|
|
|
|
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry3, _guard3) = default_entry();
|
|
|
|
queue.append(entry3);
|
2023-02-02 13:59:27 +00:00
|
|
|
|
2023-02-13 12:02:45 +00:00
|
|
|
let (entries, batch, _) = queue.next_batch(None, 3).await.unwrap();
|
2023-02-02 13:59:27 +00:00
|
|
|
assert_eq!(entries.len(), 2);
|
|
|
|
assert!(entries.contains_key(&1));
|
|
|
|
assert!(entries.contains_key(&2));
|
|
|
|
assert_eq!(batch.id, 1);
|
|
|
|
assert_eq!(batch.size, 2);
|
|
|
|
}
|
2023-04-20 09:07:40 +00:00
|
|
|
|
|
|
|
#[tokio::test]
|
|
|
|
async fn test_queue_next_batch_dropped_receiver() {
|
2023-04-24 15:59:00 +00:00
|
|
|
let queue = Queue::new(false);
|
2023-04-20 09:07:40 +00:00
|
|
|
let (entry, _) = default_entry();
|
|
|
|
queue.append(entry);
|
|
|
|
|
|
|
|
assert!(queue.next_batch(None, 1).await.is_none());
|
|
|
|
}
|
2023-02-02 13:59:27 +00:00
|
|
|
}
|