Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions crates/fluss/src/client/table/log_fetch_buffer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -657,13 +657,13 @@ mod tests {
use std::sync::Arc;
use std::time::Duration;

fn test_read_context() -> ReadContext {
fn test_read_context() -> Result<ReadContext> {
let row_type = RowType::new(vec![DataField::new(
"id".to_string(),
DataTypes::int(),
None,
)]);
ReadContext::new(to_arrow_schema(&row_type), false)
Ok(ReadContext::new(to_arrow_schema(&row_type)?, false))
}

struct ErrorPendingFetch {
Expand All @@ -689,7 +689,7 @@ mod tests {

#[tokio::test]
async fn await_not_empty_returns_wakeup_error() {
let buffer = LogFetchBuffer::new(test_read_context());
let buffer = LogFetchBuffer::new(test_read_context().unwrap());
buffer.wakeup();

let result = buffer.await_not_empty(Duration::from_millis(10)).await;
Expand All @@ -698,7 +698,7 @@ mod tests {

#[tokio::test]
async fn await_not_empty_returns_pending_error() {
let buffer = LogFetchBuffer::new(test_read_context());
let buffer = LogFetchBuffer::new(test_read_context().unwrap());
let table_bucket = TableBucket::new(1, 0);
buffer.pend(Box::new(ErrorPendingFetch {
table_bucket: table_bucket.clone(),
Expand Down Expand Up @@ -728,7 +728,7 @@ mod tests {
compression_type: ArrowCompressionType::None,
compression_level: DEFAULT_NON_ZSTD_COMPRESSION_LEVEL,
},
);
)?;

let mut row = GenericRow::new();
row.set_field(0, 1_i32);
Expand All @@ -738,7 +738,7 @@ mod tests {

let data = builder.build()?;
let log_records = LogRecordsBatches::new(data.clone());
let read_context = ReadContext::new(to_arrow_schema(&row_type), false);
let read_context = ReadContext::new(to_arrow_schema(&row_type)?, false);
let mut fetch = DefaultCompletedFetch::new(
TableBucket::new(1, 0),
log_records,
Expand Down
8 changes: 4 additions & 4 deletions crates/fluss/src/client/table/scanner.rs
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ impl LogFetcher {
log_scanner_status: Arc<LogScannerStatus>,
projected_fields: Option<Vec<usize>>,
) -> Result<Self> {
let full_arrow_schema = to_arrow_schema(table_info.get_row_type());
let full_arrow_schema = to_arrow_schema(table_info.get_row_type())?;
let read_context =
Self::create_read_context(full_arrow_schema.clone(), projected_fields.clone(), false)?;
let remote_read_context =
Expand Down Expand Up @@ -1445,7 +1445,7 @@ mod tests {
compression_type: ArrowCompressionType::None,
compression_level: DEFAULT_NON_ZSTD_COMPRESSION_LEVEL,
},
);
)?;
let record = WriteRecord::for_append(
table_path,
1,
Expand Down Expand Up @@ -1477,7 +1477,7 @@ mod tests {

let data = build_records(&table_info, Arc::new(table_path))?;
let log_records = LogRecordsBatches::new(data.clone());
let read_context = ReadContext::new(to_arrow_schema(table_info.get_row_type()), false);
let read_context = ReadContext::new(to_arrow_schema(table_info.get_row_type())?, false);
let completed =
DefaultCompletedFetch::new(bucket.clone(), log_records, data.len(), read_context, 0, 0);
fetcher.log_fetch_buffer.add(Box::new(completed));
Expand Down Expand Up @@ -1506,7 +1506,7 @@ mod tests {
let bucket = TableBucket::new(1, 0);
let data = build_records(&table_info, Arc::new(table_path))?;
let log_records = LogRecordsBatches::new(data.clone());
let read_context = ReadContext::new(to_arrow_schema(table_info.get_row_type()), false);
let read_context = ReadContext::new(to_arrow_schema(table_info.get_row_type())?, false);
let mut completed: Box<dyn CompletedFetch> = Box::new(DefaultCompletedFetch::new(
bucket,
log_records,
Expand Down
2 changes: 1 addition & 1 deletion crates/fluss/src/client/write/accumulator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ impl RecordAccumulator {
bucket_id,
current_time_ms(),
matches!(&record.record, Record::Log(LogWriteRecord::RecordBatch(_))),
)),
)?),
Record::Kv(kv_record) => Kv(KvWriteBatch::new(
self.batch_id.fetch_add(1, Ordering::Relaxed),
table_path.as_ref().clone(),
Expand Down
8 changes: 4 additions & 4 deletions crates/fluss/src/client/write/batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -197,18 +197,18 @@ impl ArrowLogWriteBatch {
bucket_id: BucketId,
create_ms: i64,
to_append_record_batch: bool,
) -> Self {
) -> Result<Self> {
let base = InnerWriteBatch::new(batch_id, table_path, create_ms, bucket_id);
Self {
Ok(Self {
write_batch: base,
arrow_builder: MemoryLogRecordsArrowBuilder::new(
schema_id,
row_type,
to_append_record_batch,
arrow_compression_info,
),
)?,
built_records: None,
}
})
}

pub fn batch_id(&self) -> i64 {
Expand Down
Loading
Loading