91
91
import org .opensearch .repositories .s3 .async .UploadRequest ;
92
92
import org .opensearch .repositories .s3 .utils .HttpRangeUtils ;
93
93
94
+ import java .io .BufferedInputStream ;
94
95
import java .io .ByteArrayInputStream ;
95
96
import java .io .IOException ;
96
97
import java .io .InputStream ;
@@ -188,10 +189,38 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> comp
188
189
writeContext .getWritePriority (),
189
190
writeContext .getUploadFinalizer (),
190
191
writeContext .doRemoteDataIntegrityCheck (),
191
- writeContext .getExpectedChecksum ()
192
+ writeContext .getExpectedChecksum (),
193
+ blobStore .isUploadRetryEnabled ()
192
194
);
193
195
try {
194
- long partSize = blobStore .getAsyncTransferManager ().calculateOptimalPartSize (writeContext .getFileSize ());
196
+ if (uploadRequest .getContentLength () > ByteSizeUnit .GB .toBytes (10 ) && blobStore .isRedirectLargeUploads ()) {
197
+ StreamContext streamContext = SocketAccess .doPrivileged (
198
+ () -> writeContext .getStreamProvider (uploadRequest .getContentLength ())
199
+ );
200
+ InputStreamContainer inputStream = streamContext .provideStream (0 );
201
+ try {
202
+ executeMultipartUpload (
203
+ blobStore ,
204
+ uploadRequest .getKey (),
205
+ inputStream .getInputStream (),
206
+ uploadRequest .getContentLength ()
207
+ );
208
+ completionListener .onResponse (null );
209
+ } catch (Exception ex ) {
210
+ logger .error (
211
+ () -> new ParameterizedMessage (
212
+ "Failed to upload large file {} of size {} " ,
213
+ uploadRequest .getKey (),
214
+ uploadRequest .getContentLength ()
215
+ ),
216
+ ex
217
+ );
218
+ completionListener .onFailure (ex );
219
+ }
220
+ return ;
221
+ }
222
+ long partSize = blobStore .getAsyncTransferManager ()
223
+ .calculateOptimalPartSize (writeContext .getFileSize (), writeContext .getWritePriority (), blobStore .isUploadRetryEnabled ());
195
224
StreamContext streamContext = SocketAccess .doPrivileged (() -> writeContext .getStreamProvider (partSize ));
196
225
try (AmazonAsyncS3Reference amazonS3Reference = SocketAccess .doPrivileged (blobStore ::asyncClientReference )) {
197
226
@@ -537,8 +566,14 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin
537
566
538
567
PutObjectRequest putObjectRequest = putObjectRequestBuilder .build ();
539
568
try (AmazonS3Reference clientReference = blobStore .clientReference ()) {
569
+ final InputStream requestInputStream ;
570
+ if (blobStore .isUploadRetryEnabled ()) {
571
+ requestInputStream = new BufferedInputStream (input , (int ) (blobSize + 1 ));
572
+ } else {
573
+ requestInputStream = input ;
574
+ }
540
575
SocketAccess .doPrivilegedVoid (
541
- () -> clientReference .get ().putObject (putObjectRequest , RequestBody .fromInputStream (input , blobSize ))
576
+ () -> clientReference .get ().putObject (putObjectRequest , RequestBody .fromInputStream (requestInputStream , blobSize ))
542
577
);
543
578
} catch (final SdkException e ) {
544
579
throw new IOException ("Unable to upload object [" + blobName + "] using a single upload" , e );
@@ -578,6 +613,13 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName,
578
613
createMultipartUploadRequestBuilder .serverSideEncryption (ServerSideEncryption .AES256 );
579
614
}
580
615
616
+ final InputStream requestInputStream ;
617
+ if (blobStore .isUploadRetryEnabled ()) {
618
+ requestInputStream = new BufferedInputStream (input , (int ) (partSize + 1 ));
619
+ } else {
620
+ requestInputStream = input ;
621
+ }
622
+
581
623
CreateMultipartUploadRequest createMultipartUploadRequest = createMultipartUploadRequestBuilder .build ();
582
624
try (AmazonS3Reference clientReference = blobStore .clientReference ()) {
583
625
uploadId .set (
@@ -601,10 +643,9 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName,
601
643
.build ();
602
644
603
645
bytesCount += uploadPartRequest .contentLength ();
604
-
605
646
final UploadPartResponse uploadResponse = SocketAccess .doPrivileged (
606
647
() -> clientReference .get ()
607
- .uploadPart (uploadPartRequest , RequestBody .fromInputStream (input , uploadPartRequest .contentLength ()))
648
+ .uploadPart (uploadPartRequest , RequestBody .fromInputStream (requestInputStream , uploadPartRequest .contentLength ()))
608
649
);
609
650
parts .add (CompletedPart .builder ().partNumber (uploadPartRequest .partNumber ()).eTag (uploadResponse .eTag ()).build ());
610
651
}
0 commit comments