62
62
import software .amazon .awssdk .services .s3 .model .UploadPartRequest ;
63
63
import software .amazon .awssdk .services .s3 .model .UploadPartResponse ;
64
64
import software .amazon .awssdk .services .s3 .paginators .ListObjectsV2Iterable ;
65
+ import software .amazon .awssdk .utils .CollectionUtils ;
65
66
66
67
import org .apache .logging .log4j .LogManager ;
67
68
import org .apache .logging .log4j .Logger ;
77
78
import org .opensearch .common .blobstore .BlobPath ;
78
79
import org .opensearch .common .blobstore .BlobStoreException ;
79
80
import org .opensearch .common .blobstore .DeleteResult ;
81
+ import org .opensearch .common .blobstore .FetchBlobResult ;
80
82
import org .opensearch .common .blobstore .stream .read .ReadContext ;
81
83
import org .opensearch .common .blobstore .stream .write .WriteContext ;
82
84
import org .opensearch .common .blobstore .stream .write .WritePriority ;
@@ -138,6 +140,13 @@ public boolean blobExists(String blobName) {
138
140
}
139
141
}
140
142
143
+ @ ExperimentalApi
144
+ @ Override
145
+ public FetchBlobResult readBlobWithMetadata (String blobName ) throws IOException {
146
+ S3RetryingInputStream s3RetryingInputStream = new S3RetryingInputStream (blobStore , buildKey (blobName ));
147
+ return new FetchBlobResult (s3RetryingInputStream , s3RetryingInputStream .getMetadata ());
148
+ }
149
+
141
150
@ Override
142
151
public InputStream readBlob (String blobName ) throws IOException {
143
152
return new S3RetryingInputStream (blobStore , buildKey (blobName ));
@@ -169,12 +178,27 @@ public long readBlobPreferredLength() {
169
178
*/
170
179
@ Override
171
180
public void writeBlob (String blobName , InputStream inputStream , long blobSize , boolean failIfAlreadyExists ) throws IOException {
181
+ writeBlobWithMetadata (blobName , inputStream , blobSize , failIfAlreadyExists , null );
182
+ }
183
+
184
+ /**
185
+ * Write blob with its object metadata.
186
+ */
187
+ @ ExperimentalApi
188
+ @ Override
189
+ public void writeBlobWithMetadata (
190
+ String blobName ,
191
+ InputStream inputStream ,
192
+ long blobSize ,
193
+ boolean failIfAlreadyExists ,
194
+ @ Nullable Map <String , String > metadata
195
+ ) throws IOException {
172
196
assert inputStream .markSupported () : "No mark support on inputStream breaks the S3 SDK's ability to retry requests" ;
173
197
SocketAccess .doPrivilegedIOException (() -> {
174
198
if (blobSize <= getLargeBlobThresholdInBytes ()) {
175
- executeSingleUpload (blobStore , buildKey (blobName ), inputStream , blobSize );
199
+ executeSingleUpload (blobStore , buildKey (blobName ), inputStream , blobSize , metadata );
176
200
} else {
177
- executeMultipartUpload (blobStore , buildKey (blobName ), inputStream , blobSize );
201
+ executeMultipartUpload (blobStore , buildKey (blobName ), inputStream , blobSize , metadata );
178
202
}
179
203
return null ;
180
204
});
@@ -190,7 +214,8 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> comp
190
214
writeContext .getUploadFinalizer (),
191
215
writeContext .doRemoteDataIntegrityCheck (),
192
216
writeContext .getExpectedChecksum (),
193
- blobStore .isUploadRetryEnabled ()
217
+ blobStore .isUploadRetryEnabled (),
218
+ writeContext .getMetadata ()
194
219
);
195
220
try {
196
221
if (uploadRequest .getContentLength () > ByteSizeUnit .GB .toBytes (10 ) && blobStore .isRedirectLargeUploads ()) {
@@ -203,7 +228,8 @@ public void asyncBlobUpload(WriteContext writeContext, ActionListener<Void> comp
203
228
blobStore ,
204
229
uploadRequest .getKey (),
205
230
inputStream .getInputStream (),
206
- uploadRequest .getContentLength ()
231
+ uploadRequest .getContentLength (),
232
+ uploadRequest .getMetadata ()
207
233
);
208
234
completionListener .onResponse (null );
209
235
} catch (Exception ex ) {
@@ -542,8 +568,13 @@ private String buildKey(String blobName) {
542
568
/**
543
569
* Uploads a blob using a single upload request
544
570
*/
545
- void executeSingleUpload (final S3BlobStore blobStore , final String blobName , final InputStream input , final long blobSize )
546
- throws IOException {
571
+ void executeSingleUpload (
572
+ final S3BlobStore blobStore ,
573
+ final String blobName ,
574
+ final InputStream input ,
575
+ final long blobSize ,
576
+ final Map <String , String > metadata
577
+ ) throws IOException {
547
578
548
579
// Extra safety checks
549
580
if (blobSize > MAX_FILE_SIZE .getBytes ()) {
@@ -560,6 +591,10 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin
560
591
.storageClass (blobStore .getStorageClass ())
561
592
.acl (blobStore .getCannedACL ())
562
593
.overrideConfiguration (o -> o .addMetricPublisher (blobStore .getStatsMetricPublisher ().putObjectMetricPublisher ));
594
+
595
+ if (CollectionUtils .isNotEmpty (metadata )) {
596
+ putObjectRequestBuilder = putObjectRequestBuilder .metadata (metadata );
597
+ }
563
598
if (blobStore .serverSideEncryption ()) {
564
599
putObjectRequestBuilder .serverSideEncryption (ServerSideEncryption .AES256 );
565
600
}
@@ -583,8 +618,13 @@ void executeSingleUpload(final S3BlobStore blobStore, final String blobName, fin
583
618
/**
584
619
* Uploads a blob using multipart upload requests.
585
620
*/
586
- void executeMultipartUpload (final S3BlobStore blobStore , final String blobName , final InputStream input , final long blobSize )
587
- throws IOException {
621
+ void executeMultipartUpload (
622
+ final S3BlobStore blobStore ,
623
+ final String blobName ,
624
+ final InputStream input ,
625
+ final long blobSize ,
626
+ final Map <String , String > metadata
627
+ ) throws IOException {
588
628
589
629
ensureMultiPartUploadSize (blobSize );
590
630
final long partSize = blobStore .bufferSizeInBytes ();
@@ -609,6 +649,10 @@ void executeMultipartUpload(final S3BlobStore blobStore, final String blobName,
609
649
.acl (blobStore .getCannedACL ())
610
650
.overrideConfiguration (o -> o .addMetricPublisher (blobStore .getStatsMetricPublisher ().multipartUploadMetricCollector ));
611
651
652
+ if (CollectionUtils .isNotEmpty (metadata )) {
653
+ createMultipartUploadRequestBuilder .metadata (metadata );
654
+ }
655
+
612
656
if (blobStore .serverSideEncryption ()) {
613
657
createMultipartUploadRequestBuilder .serverSideEncryption (ServerSideEncryption .AES256 );
614
658
}
0 commit comments