Expected Behavior
Multipart uploads with FULL_OBJECT CRC32 should work
Current Behavior
Multipart uploads with FULL_OBJECT CRC32 don't work with current Minio.
They do work with real S3.
Steps to Reproduce (for bugs)
Run the following script:
import boto3
import zlib
import base64
import tempfile
import os
from botocore.exceptions import ClientError
CHUNK_SIZE = 8 * 1024 * 1024 # 8 MB
AWS_REGION = "us-east-1"
BUCKET_NAME = "your-unique-bucket-name-2039750257079"
def crc32_to_base64(crc):
crc &= 0xFFFFFFFF
return base64.b64encode(crc.to_bytes(4, "big")).decode("utf-8")
def create_s3_client():
return boto3.client(
"s3",
endpoint_url="http://localhost:9000",
aws_access_key_id="minioadmin",
aws_secret_access_key="minioadmin",
region_name=AWS_REGION
)
def create_bucket_if_not_exists(s3, bucket_name):
try:
s3.head_bucket(Bucket=bucket_name)
print(f"Bucket already exists: {bucket_name}")
except ClientError:
print(f"Creating bucket: {bucket_name}")
if AWS_REGION == "us-east-1":
s3.create_bucket(Bucket=bucket_name)
else:
s3.create_bucket(
Bucket=bucket_name,
CreateBucketConfiguration={
"LocationConstraint": AWS_REGION
}
)
def create_temp_file():
tmp = tempfile.NamedTemporaryFile(delete=False)
target_size = CHUNK_SIZE * 3 # 3 parts
chunk = b"A" * (1024 * 1024) # write in 1 MB blocks
written = 0
while written < target_size:
to_write = min(len(chunk), target_size - written)
tmp.write(chunk[:to_write])
written += to_write
tmp.close()
return tmp.name
def multipart_upload(s3, file_path, bucket, key):
resp = s3.create_multipart_upload(
Bucket=bucket,
Key=key,
ChecksumAlgorithm="CRC32",
ChecksumType="FULL_OBJECT",
)
upload_id = resp["UploadId"]
parts = []
full_crc32 = 0
try:
with open(file_path, "rb") as f:
part_number = 1
while True:
chunk = f.read(CHUNK_SIZE)
if not chunk:
break
full_crc32 = zlib.crc32(chunk, full_crc32)
response = s3.upload_part(
Bucket=bucket,
Key=key,
PartNumber=part_number,
UploadId=upload_id,
Body=chunk
)
parts.append({
"ETag": response["ETag"],
"PartNumber": part_number
})
print(f"Uploaded part {part_number}")
part_number += 1
full_crc32_b64 = crc32_to_base64(full_crc32)
result = s3.complete_multipart_upload(
Bucket=bucket,
Key=key,
UploadId=upload_id,
MultipartUpload={"Parts": parts},
ChecksumCRC32=full_crc32_b64,
ChecksumType="FULL_OBJECT",
)
print("Upload complete ✅")
print("S3 checksum:", result.get("ChecksumCRC32"))
except Exception as e:
print("Error:", e)
s3.abort_multipart_upload(
Bucket=bucket,
Key=key,
UploadId=upload_id
)
raise
if __name__ == "__main__":
s3 = create_s3_client()
# 1. Ensure bucket exists
create_bucket_if_not_exists(s3, BUCKET_NAME)
# 2. Create temp file
temp_file = create_temp_file()
try:
# 3. Upload
multipart_upload(
s3,
file_path=temp_file,
bucket=BUCKET_NAME,
key="uploads/temp-example.txt"
)
finally:
os.remove(temp_file)
print("Temp file deleted")
It works with S3.
With Minio, you get:
botocore.exceptions.ClientError: An error occurred (InvalidPart) when calling the CompleteMultipartUpload operation: One or more of the specified parts could not be found. The part may not have been uploaded, or the specified entity tag may not match the part's entity tag.
Note: the same happens with the S3 Java SDK.
Regression
No
Your Environment
- Version used (
minio --version): RELEASE.2026-04-17T00-00-00Z (go1.26.2 linux/arm64)
- Server setup and configuration:
- Operating System and version (
uname -a):
Expected Behavior
Multipart uploads with FULL_OBJECT CRC32 should work
Current Behavior
Multipart uploads with FULL_OBJECT CRC32 don't work with current Minio.
They do work with real S3.
Steps to Reproduce (for bugs)
Run the following script:
It works with S3.
With Minio, you get:
Note: the same happens with the S3 Java SDK.
Regression
No
Your Environment
minio --version):RELEASE.2026-04-17T00-00-00Z (go1.26.2 linux/arm64)uname -a):