test(ecstore): cover non-zero offset shard span reads (#2677)

Co-authored-by: houseme <housemecn@gmail.com>
This commit is contained in:
安正超
2026-04-25 10:31:28 +08:00
committed by GitHub
parent b96ccfd110
commit 80413e0f8e

View File

@@ -5663,6 +5663,100 @@ mod tests {
assert_eq!(complete_part_checksum(&part, full_object_crc32), Some(Some("AAAAAA==".to_string())));
}
#[tokio::test]
async fn range_reads_use_shard_span_length_for_non_zero_offsets() {
use tokio::io::AsyncReadExt;
use uuid::Uuid;
let tempdir = tempfile::tempdir().expect("tempdir should be created");
let endpoint =
Endpoint::try_from(tempdir.path().to_str().expect("tempdir path should be utf8")).expect("endpoint should parse");
let disk = new_disk(
&endpoint,
&DiskOption {
cleanup: false,
health_check: false,
},
)
.await
.expect("disk should be created");
let bucket = "bucket";
let object = "object";
let payload = vec![b'x'; 3 * 1024 * 1024 + 1234];
let range_offset = 2 * 1024 * 1024 + 17;
let range_length = 512 * 1024;
disk.make_volume(bucket).await.expect("bucket should be created");
let mut fi = FileInfo::new(&format!("{bucket}/{object}"), 1, 0);
let data_dir = Uuid::new_v4();
fi.data_dir = Some(data_dir);
fi.size = payload.len() as i64;
fi.add_object_part(1, String::new(), payload.len(), None, payload.len() as i64, None, None);
let erasure = erasure_coding::Erasure::new_with_options(
fi.erasure.data_blocks,
fi.erasure.parity_blocks,
fi.erasure.block_size,
fi.uses_legacy_checksum,
);
let shard_path = format!("{object}/{data_dir}/part.1");
let checksum_info = fi.erasure.get_checksum_info(1);
let mut bitrot_writer = create_bitrot_writer(
true,
None,
bucket,
&shard_path,
payload.len() as i64,
erasure.shard_size(),
checksum_info.algorithm.clone(),
)
.await
.expect("bitrot writer should be created");
for chunk in payload.chunks(erasure.shard_size()) {
bitrot_writer.write(chunk).await.expect("payload chunk should be written");
}
let encoded = bitrot_writer.into_inline_data().expect("bitrot encoded data should exist");
disk.write_all(bucket, &shard_path, Bytes::from(encoded))
.await
.expect("encoded shard should be stored");
let files = vec![fi.clone()];
let disks = vec![Some(disk.clone())];
let (mut reader, mut writer) = tokio::io::duplex(range_length * 2);
let read_task = tokio::spawn(async move {
SetDisks::get_object_with_fileinfo(
bucket,
object,
range_offset,
range_length as i64,
&mut writer,
fi,
files,
&disks,
0,
0,
true,
)
.await
});
let mut out = Vec::new();
reader.read_to_end(&mut out).await.expect("range bytes should be readable");
read_task
.await
.expect("read task should complete")
.expect("range read should succeed");
assert_eq!(out, payload[range_offset..range_offset + range_length]);
}
#[test]
fn parts_after_marker_uses_marker_position() {
let part_numbers = (1..=1002).collect::<Vec<_>>();