From 5d761877386360e9465c8740fafa3b159b12d2d5 Mon Sep 17 00:00:00 2001 From: Russole <850905junior@gmail.com> Date: Sat, 14 Feb 2026 22:45:01 +0800 Subject: [PATCH 1/5] HDDS-14590. Extract MultipartKeyHandler for MPU object operations --- .../s3/endpoint/MultipartKeyHandler.java | 188 ++++++++++++++++++ .../ozone/s3/endpoint/ObjectEndpoint.java | 133 +------------ 2 files changed, 190 insertions(+), 131 deletions(-) create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java new file mode 100644 index 00000000000..fe396558d61 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java @@ -0,0 +1,188 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.s3.endpoint; + +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.NO_SUCH_UPLOAD; +import static org.apache.hadoop.ozone.s3.exception.S3ErrorTable.newError; +import static org.apache.hadoop.ozone.s3.util.S3Consts.QueryParams; + +import java.io.IOException; +import java.time.Instant; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.S3GAction; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; +import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.exception.OS3Exception; +import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; +import org.apache.hadoop.ozone.s3.util.S3StorageType; + +/** + * Handles MPU (Multipart Upload) non-POST operations for object key endpoint. + */ +public class MultipartKeyHandler extends ObjectOperationHandler { + + @Override + Response handleGetRequest(ObjectEndpoint.ObjectRequestContext context, String keyPath) + throws IOException, OS3Exception { + + final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); + if (uploadId == null) { + // not MPU -> let next handler run + return null; + } + + context.setAction(S3GAction.LIST_PARTS); + + final int maxParts = queryParams().getInt(QueryParams.MAX_PARTS, 1000); + final String partNumberMarker = queryParams().get(QueryParams.PART_NUMBER_MARKER); + final long startNanos = context.getStartNanos(); + final AuditLogger.PerformanceStringBuilder perf = context.getPerf(); + + try { + int partMarker = parsePartNumberMarker(partNumberMarker); + Response response = listParts(context.getBucket(), keyPath, uploadId, + partMarker, maxParts, perf); + long opLatencyNs = getMetrics().updateListPartsSuccessStats(startNanos); + getMetrics().updateListPartsSuccessStats(startNanos); + perf.appendOpLatencyNanos(opLatencyNs); + return response; + + } catch (IOException | RuntimeException ex) { + getMetrics().updateListPartsFailureStats(startNanos); + throw ex; + } + } + + @Override + Response handleDeleteRequest(ObjectEndpoint.ObjectRequestContext context, String keyPath) + throws IOException, OS3Exception { + + final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); + if (StringUtils.isEmpty(uploadId)) { + // not MPU -> let next handler run + return null; + } + + context.setAction(S3GAction.ABORT_MULTIPART_UPLOAD); + + final long startNanos = context.getStartNanos(); + try { + Response r = abortMultipartUpload(context.getVolume(), + context.getBucketName(), keyPath, uploadId); + + getMetrics().updateAbortMultipartUploadSuccessStats(startNanos); + return r; + + } catch (IOException | RuntimeException ex) { + getMetrics().updateAbortMultipartUploadFailureStats(startNanos); + throw ex; + } + } + + /** + * Abort multipart upload request. + * @param bucket + * @param key + * @param uploadId + * @return Response + * @throws IOException + * @throws OS3Exception + */ + private Response abortMultipartUpload(OzoneVolume volume, String bucket, + String key, String uploadId) throws IOException, OS3Exception { + try { + getClientProtocol().abortMultipartUpload(volume.getName(), bucket, key, uploadId); + } catch (OMException ex) { + if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { + throw newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId, ex); + } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { + throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucket, ex); + } + throw ex; + } + return Response.status(Status.NO_CONTENT).build(); + } + + /** + * Returns response for the listParts request. + * See: https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html + * @param ozoneBucket + * @param key + * @param uploadId + * @param partNumberMarker + * @param maxParts + * @return + * @throws IOException + * @throws OS3Exception + */ + private Response listParts(OzoneBucket ozoneBucket, String key, String uploadId, + int partNumberMarker, int maxParts, + org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder perf) + throws IOException, OS3Exception { + + ListPartsResponse resp = new ListPartsResponse(); + String bucketName = ozoneBucket.getName(); + + try { + OzoneMultipartUploadPartListParts parts = + ozoneBucket.listParts(key, uploadId, partNumberMarker, maxParts); + + resp.setBucket(bucketName); + resp.setKey(key); + resp.setUploadID(uploadId); + resp.setMaxParts(maxParts); + resp.setPartNumberMarker(partNumberMarker); + resp.setTruncated(false); + + resp.setStorageClass(S3StorageType.fromReplicationConfig( + parts.getReplicationConfig()).toString()); + + if (parts.isTruncated()) { + resp.setTruncated(true); + resp.setNextPartNumberMarker(parts.getNextPartNumberMarker()); + } + + parts.getPartInfoList().forEach(p -> { + ListPartsResponse.Part part = new ListPartsResponse.Part(); + part.setPartNumber(p.getPartNumber()); + part.setETag(StringUtils.isNotEmpty(p.getETag()) ? p.getETag() : p.getPartName()); + part.setSize(p.getSize()); + part.setLastModified(Instant.ofEpochMilli(p.getModificationTime())); + resp.addPart(part); + }); + + } catch (OMException ex) { + if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { + throw newError(NO_SUCH_UPLOAD, uploadId, ex); + } else if (isAccessDenied(ex)) { + throw newError(S3ErrorTable.ACCESS_DENIED, + bucketName + "/" + key + "/" + uploadId, ex); + } + throw ex; + } + + perf.appendCount(resp.getPartList().size()); + return Response.status(Status.OK).entity(resp).build(); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 91493c3b550..78e15fa7503 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -151,6 +151,7 @@ protected void init() { ObjectOperationHandler chain = ObjectOperationHandlerChain.newBuilder(this) .add(new ObjectAclHandler()) .add(new ObjectTaggingHandler()) + .add(new MultipartKeyHandler()) .add(this) .build(); handler = new AuditingObjectOperationHandler(chain); @@ -390,28 +391,13 @@ public Response get( Response handleGetRequest(ObjectRequestContext context, String keyPath) throws IOException, OS3Exception { - final int maxParts = queryParams().getInt(QueryParams.MAX_PARTS, 1000); final int partNumber = queryParams().getInt(QueryParams.PART_NUMBER, 0); - final String partNumberMarker = queryParams().get(QueryParams.PART_NUMBER_MARKER); - final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); final long startNanos = context.getStartNanos(); final PerformanceStringBuilder perf = context.getPerf(); try { final String bucketName = context.getBucketName(); - final OzoneBucket bucket = context.getBucket(); - - if (uploadId != null) { - // list parts - context.setAction(S3GAction.LIST_PARTS); - - int partMarker = parsePartNumberMarker(partNumberMarker); - Response response = listParts(bucket, keyPath, uploadId, - partMarker, maxParts, perf); - - return response; - } context.setAction(S3GAction.GET_KEY); @@ -514,9 +500,6 @@ Response handleGetRequest(ObjectRequestContext context, String keyPath) return responseBuilder.build(); } catch (IOException | RuntimeException ex) { - if (uploadId == null) { - getMetrics().updateGetKeyFailureStats(startNanos); - } throw ex; } } @@ -628,37 +611,6 @@ Example of such app is Trino (through Hive connector). } } - /** - * Abort multipart upload request. - * @param bucket - * @param key - * @param uploadId - * @return Response - * @throws IOException - * @throws OS3Exception - */ - private Response abortMultipartUpload(OzoneVolume volume, String bucket, - String key, String uploadId) - throws IOException, OS3Exception { - long startNanos = Time.monotonicNowNanos(); - try { - getClientProtocol().abortMultipartUpload(volume.getName(), bucket, - key, uploadId); - } catch (OMException ex) { - if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId, ex); - } else if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) { - throw newError(S3ErrorTable.NO_SUCH_BUCKET, bucket, ex); - } - throw ex; - } - getMetrics().updateAbortMultipartUploadSuccessStats(startNanos); - return Response - .status(Status.NO_CONTENT) - .build(); - } - - /** * Delete a specific object from a bucket, if query param uploadId is * specified, this request is for abort multipart upload. @@ -704,30 +656,18 @@ public Response delete( Response handleDeleteRequest(ObjectRequestContext context, String keyPath) throws IOException, OS3Exception { - final String bucketName = context.getBucketName(); final long startNanos = context.startNanos; - final String uploadId = queryParams().get(QueryParams.UPLOAD_ID); try { OzoneVolume volume = context.getVolume(); - if (uploadId != null && !uploadId.isEmpty()) { - context.setAction(S3GAction.ABORT_MULTIPART_UPLOAD); - - return abortMultipartUpload(volume, bucketName, keyPath, uploadId); - } - getClientProtocol().deleteKey(volume.getName(), context.getBucketName(), keyPath, false); getMetrics().updateDeleteKeySuccessStats(startNanos); return Response.status(Status.NO_CONTENT).build(); } catch (Exception ex) { - if (uploadId != null && !uploadId.isEmpty()) { - getMetrics().updateAbortMultipartUploadFailureStats(startNanos); - } else { - getMetrics().updateDeleteKeyFailureStats(startNanos); - } + getMetrics().updateDeleteKeyFailureStats(startNanos); throw ex; } } @@ -1038,75 +978,6 @@ private Response createMultipartKey(OzoneVolume volume, OzoneBucket ozoneBucket, } } - /** - * Returns response for the listParts request. - * See: https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadListParts.html - * @param ozoneBucket - * @param key - * @param uploadID - * @param partNumberMarker - * @param maxParts - * @return - * @throws IOException - * @throws OS3Exception - */ - private Response listParts(OzoneBucket ozoneBucket, String key, String uploadID, - int partNumberMarker, int maxParts, PerformanceStringBuilder perf) - throws IOException, OS3Exception { - long startNanos = Time.monotonicNowNanos(); - ListPartsResponse listPartsResponse = new ListPartsResponse(); - String bucketName = ozoneBucket.getName(); - try { - OzoneMultipartUploadPartListParts ozoneMultipartUploadPartListParts = - ozoneBucket.listParts(key, uploadID, partNumberMarker, maxParts); - listPartsResponse.setBucket(bucketName); - listPartsResponse.setKey(key); - listPartsResponse.setUploadID(uploadID); - listPartsResponse.setMaxParts(maxParts); - listPartsResponse.setPartNumberMarker(partNumberMarker); - listPartsResponse.setTruncated(false); - - listPartsResponse.setStorageClass(S3StorageType.fromReplicationConfig( - ozoneMultipartUploadPartListParts.getReplicationConfig()).toString()); - - if (ozoneMultipartUploadPartListParts.isTruncated()) { - listPartsResponse.setTruncated( - ozoneMultipartUploadPartListParts.isTruncated()); - listPartsResponse.setNextPartNumberMarker( - ozoneMultipartUploadPartListParts.getNextPartNumberMarker()); - } - - ozoneMultipartUploadPartListParts.getPartInfoList().forEach(partInfo -> { - ListPartsResponse.Part part = new ListPartsResponse.Part(); - part.setPartNumber(partInfo.getPartNumber()); - // If the ETag field does not exist, use MPU part name for backward - // compatibility - part.setETag(StringUtils.isNotEmpty(partInfo.getETag()) ? - partInfo.getETag() : partInfo.getPartName()); - part.setSize(partInfo.getSize()); - part.setLastModified(Instant.ofEpochMilli( - partInfo.getModificationTime())); - listPartsResponse.addPart(part); - }); - } catch (OMException ex) { - getMetrics().updateListPartsFailureStats(startNanos); - if (ex.getResult() == ResultCodes.NO_SUCH_MULTIPART_UPLOAD_ERROR) { - throw newError(NO_SUCH_UPLOAD, uploadID, ex); - } else if (isAccessDenied(ex)) { - throw newError(S3ErrorTable.ACCESS_DENIED, - bucketName + "/" + key + "/" + uploadID, ex); - } - throw ex; - } catch (IOException | RuntimeException ex) { - getMetrics().updateListPartsFailureStats(startNanos); - throw ex; - } - long opLatencyNs = getMetrics().updateListPartsSuccessStats(startNanos); - perf.appendCount(listPartsResponse.getPartList().size()); - perf.appendOpLatencyNanos(opLatencyNs); - return Response.status(Status.OK).entity(listPartsResponse).build(); - } - @SuppressWarnings("checkstyle:ParameterNumber") void copy(OzoneVolume volume, DigestInputStream src, long srcKeyLen, String destKey, String destBucket, From d382e313d73380d5c39e1212aee4f298ad6bd758 Mon Sep 17 00:00:00 2001 From: Russole <850905junior@gmail.com> Date: Sat, 14 Feb 2026 23:27:12 +0800 Subject: [PATCH 2/5] Remove unnecessary imports --- .../java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 1 - 1 file changed, 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 78e15fa7503..4ea59b75ff9 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -88,7 +88,6 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneKey; import org.apache.hadoop.ozone.client.OzoneKeyDetails; -import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; From 67d1bf15fe30bce7b4703e9bca2c9d89727e704f Mon Sep 17 00:00:00 2001 From: Russole <850905junior@gmail.com> Date: Sun, 15 Feb 2026 10:14:47 +0800 Subject: [PATCH 3/5] Fix incorrect metrics count in updateListPartsSuccessStats and getGetKeyFailure --- .../org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java | 1 - .../java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java index fe396558d61..93fde215b07 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java @@ -64,7 +64,6 @@ Response handleGetRequest(ObjectEndpoint.ObjectRequestContext context, String ke Response response = listParts(context.getBucket(), keyPath, uploadId, partMarker, maxParts, perf); long opLatencyNs = getMetrics().updateListPartsSuccessStats(startNanos); - getMetrics().updateListPartsSuccessStats(startNanos); perf.appendOpLatencyNanos(opLatencyNs); return response; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java index 4ea59b75ff9..f56828f5364 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java @@ -499,6 +499,7 @@ Response handleGetRequest(ObjectRequestContext context, String keyPath) return responseBuilder.build(); } catch (IOException | RuntimeException ex) { + getMetrics().updateGetKeyFailureStats(startNanos); throw ex; } } From b4facb03d55637aa591784ee633e23f0138b8b28 Mon Sep 17 00:00:00 2001 From: Russole <850905junior@gmail.com> Date: Mon, 16 Feb 2026 22:42:18 +0800 Subject: [PATCH 4/5] Update patch based on review comments --- .../s3/endpoint/MultipartKeyHandler.java | 24 ++++++------------- 1 file changed, 7 insertions(+), 17 deletions(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java index 93fde215b07..6dcb8d43628 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java @@ -36,11 +36,12 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.S3StorageType; +import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; /** * Handles MPU (Multipart Upload) non-POST operations for object key endpoint. */ -public class MultipartKeyHandler extends ObjectOperationHandler { +class MultipartKeyHandler extends ObjectOperationHandler { @Override Response handleGetRequest(ObjectEndpoint.ObjectRequestContext context, String keyPath) @@ -56,19 +57,18 @@ Response handleGetRequest(ObjectEndpoint.ObjectRequestContext context, String ke final int maxParts = queryParams().getInt(QueryParams.MAX_PARTS, 1000); final String partNumberMarker = queryParams().get(QueryParams.PART_NUMBER_MARKER); - final long startNanos = context.getStartNanos(); final AuditLogger.PerformanceStringBuilder perf = context.getPerf(); try { int partMarker = parsePartNumberMarker(partNumberMarker); Response response = listParts(context.getBucket(), keyPath, uploadId, partMarker, maxParts, perf); - long opLatencyNs = getMetrics().updateListPartsSuccessStats(startNanos); + long opLatencyNs = getMetrics().updateListPartsSuccessStats(context.getStartNanos()); perf.appendOpLatencyNanos(opLatencyNs); return response; } catch (IOException | RuntimeException ex) { - getMetrics().updateListPartsFailureStats(startNanos); + getMetrics().updateListPartsFailureStats(context.getStartNanos()); throw ex; } } @@ -85,29 +85,19 @@ Response handleDeleteRequest(ObjectEndpoint.ObjectRequestContext context, String context.setAction(S3GAction.ABORT_MULTIPART_UPLOAD); - final long startNanos = context.getStartNanos(); try { Response r = abortMultipartUpload(context.getVolume(), context.getBucketName(), keyPath, uploadId); - getMetrics().updateAbortMultipartUploadSuccessStats(startNanos); + getMetrics().updateAbortMultipartUploadSuccessStats(context.getStartNanos()); return r; } catch (IOException | RuntimeException ex) { - getMetrics().updateAbortMultipartUploadFailureStats(startNanos); + getMetrics().updateAbortMultipartUploadFailureStats(context.getStartNanos()); throw ex; } } - /** - * Abort multipart upload request. - * @param bucket - * @param key - * @param uploadId - * @return Response - * @throws IOException - * @throws OS3Exception - */ private Response abortMultipartUpload(OzoneVolume volume, String bucket, String key, String uploadId) throws IOException, OS3Exception { try { @@ -137,7 +127,7 @@ private Response abortMultipartUpload(OzoneVolume volume, String bucket, */ private Response listParts(OzoneBucket ozoneBucket, String key, String uploadId, int partNumberMarker, int maxParts, - org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder perf) + PerformanceStringBuilder perf) throws IOException, OS3Exception { ListPartsResponse resp = new ListPartsResponse(); From dc8b2e3cff71e23dba86336ca8f3fb535b66a37f Mon Sep 17 00:00:00 2001 From: Russole <850905junior@gmail.com> Date: Wed, 18 Feb 2026 00:19:59 +0800 Subject: [PATCH 5/5] Fix checkstyle error --- .../apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java index 6dcb8d43628..69edae42920 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/MultipartKeyHandler.java @@ -27,6 +27,7 @@ import javax.ws.rs.core.Response.Status; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; import org.apache.hadoop.ozone.audit.S3GAction; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -36,7 +37,6 @@ import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.hadoop.ozone.s3.util.S3StorageType; -import org.apache.hadoop.ozone.audit.AuditLogger.PerformanceStringBuilder; /** * Handles MPU (Multipart Upload) non-POST operations for object key endpoint.