|
20 | 20 |
|
21 | 21 | import static org.apache.accumulo.core.Constants.IMPORT_MAPPINGS_FILE;
|
22 | 22 | import static org.apache.accumulo.core.util.LazySingletons.RANDOM;
|
| 23 | +import static org.apache.accumulo.test.TableOperationsIT.setExpectedTabletAvailability; |
| 24 | +import static org.apache.accumulo.test.TableOperationsIT.verifyTabletAvailabilites; |
23 | 25 | import static org.junit.jupiter.api.Assertions.assertEquals;
|
24 | 26 | import static org.junit.jupiter.api.Assertions.assertFalse;
|
25 | 27 | import static org.junit.jupiter.api.Assertions.assertNotNull;
|
|
33 | 35 | import java.io.InputStreamReader;
|
34 | 36 | import java.nio.file.Paths;
|
35 | 37 | import java.time.Duration;
|
| 38 | +import java.util.ArrayList; |
36 | 39 | import java.util.Arrays;
|
37 | 40 | import java.util.Iterator;
|
38 | 41 | import java.util.List;
|
39 | 42 | import java.util.Map.Entry;
|
40 | 43 | import java.util.Set;
|
| 44 | +import java.util.SortedSet; |
41 | 45 |
|
42 | 46 | import org.apache.accumulo.cluster.AccumuloCluster;
|
43 | 47 | import org.apache.accumulo.core.Constants;
|
44 | 48 | import org.apache.accumulo.core.client.Accumulo;
|
45 | 49 | import org.apache.accumulo.core.client.AccumuloClient;
|
46 | 50 | import org.apache.accumulo.core.client.BatchWriter;
|
47 | 51 | import org.apache.accumulo.core.client.Scanner;
|
| 52 | +import org.apache.accumulo.core.client.admin.AvailabilityForTablet; |
48 | 53 | import org.apache.accumulo.core.client.admin.CompactionConfig;
|
49 | 54 | import org.apache.accumulo.core.client.admin.ImportConfiguration;
|
| 55 | +import org.apache.accumulo.core.client.admin.TabletAvailability; |
50 | 56 | import org.apache.accumulo.core.data.Key;
|
51 | 57 | import org.apache.accumulo.core.data.Mutation;
|
52 | 58 | import org.apache.accumulo.core.data.Range;
|
|
75 | 81 | import org.slf4j.Logger;
|
76 | 82 | import org.slf4j.LoggerFactory;
|
77 | 83 |
|
| 84 | +import com.google.common.collect.Sets; |
| 85 | + |
78 | 86 | /**
|
79 | 87 | * ImportTable didn't correctly place absolute paths in metadata. This resulted in the imported
|
80 | 88 | * table only being usable when the actual HDFS directory for Accumulo was the same as
|
@@ -367,6 +375,144 @@ public void testExportImportOffline(boolean fenced) throws Exception {
|
367 | 375 | }
|
368 | 376 | }
|
369 | 377 |
|
| 378 | + /** |
| 379 | + * Ensure all tablets in an imported table are ONDEMAND. |
| 380 | + * |
| 381 | + * Create a table with multiple tablets, each with a different tablet availability. Export the |
| 382 | + * table. Import the table and make sure that all tablets on the imported table have the ONDEMAND |
| 383 | + * tablet availability. |
| 384 | + * |
| 385 | + * This test case stitches together code from TableOperationsIT to create the table, set up the |
| 386 | + * tablets and verify. The code to export then import the table is from |
| 387 | + * ImportExportIT.testExportImportOffline() |
| 388 | + */ |
| 389 | + @Test |
| 390 | + public void testImportedTableIsOnDemand() throws Exception { |
| 391 | + |
| 392 | + try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) { |
| 393 | + String[] tableNames = getUniqueNames(2); |
| 394 | + String srcTable = tableNames[0], destTable = tableNames[1]; |
| 395 | + |
| 396 | + client.tableOperations().create(srcTable); |
| 397 | + String srcTableId = client.tableOperations().tableIdMap().get(srcTable); |
| 398 | + |
| 399 | + // add split 'h' and 'q'. Leave first as ONDEMAND, set second to UNHOSTED, and third to HOSTED |
| 400 | + SortedSet<Text> splits = Sets.newTreeSet(Arrays.asList(new Text("h"), new Text("q"))); |
| 401 | + client.tableOperations().addSplits(srcTable, splits); |
| 402 | + Range range = new Range(new Text("h"), false, new Text("q"), true); |
| 403 | + client.tableOperations().setTabletAvailability(srcTable, range, TabletAvailability.UNHOSTED); |
| 404 | + range = new Range(new Text("q"), false, null, true); |
| 405 | + client.tableOperations().setTabletAvailability(srcTable, range, TabletAvailability.HOSTED); |
| 406 | + |
| 407 | + // verify |
| 408 | + List<AvailabilityForTablet> expectedTabletAvailability = new ArrayList<>(); |
| 409 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, "h", null, |
| 410 | + TabletAvailability.ONDEMAND); |
| 411 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, "q", "h", |
| 412 | + TabletAvailability.UNHOSTED); |
| 413 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, null, "q", |
| 414 | + TabletAvailability.HOSTED); |
| 415 | + verifyTabletAvailabilites(client, srcTable, new Range(), expectedTabletAvailability); |
| 416 | + |
| 417 | + // Add a split within each of the existing tablets. Adding 'd', 'm', and 'v' |
| 418 | + splits = Sets.newTreeSet(Arrays.asList(new Text("d"), new Text("m"), new Text("v"))); |
| 419 | + client.tableOperations().addSplits(srcTable, splits); |
| 420 | + |
| 421 | + // verify results |
| 422 | + expectedTabletAvailability.clear(); |
| 423 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, "d", null, |
| 424 | + TabletAvailability.ONDEMAND); |
| 425 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, "h", "d", |
| 426 | + TabletAvailability.ONDEMAND); |
| 427 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, "m", "h", |
| 428 | + TabletAvailability.UNHOSTED); |
| 429 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, "q", "m", |
| 430 | + TabletAvailability.UNHOSTED); |
| 431 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, "v", "q", |
| 432 | + TabletAvailability.HOSTED); |
| 433 | + setExpectedTabletAvailability(expectedTabletAvailability, srcTableId, null, "v", |
| 434 | + TabletAvailability.HOSTED); |
| 435 | + verifyTabletAvailabilites(client, srcTable, new Range(), expectedTabletAvailability); |
| 436 | + |
| 437 | + // Make a directory we can use to throw the export and import directories |
| 438 | + // Must exist on the filesystem the cluster is running. |
| 439 | + FileSystem fs = cluster.getFileSystem(); |
| 440 | + log.info("Using FileSystem: " + fs); |
| 441 | + Path baseDir = new Path(cluster.getTemporaryPath(), getClass().getName()); |
| 442 | + fs.deleteOnExit(baseDir); |
| 443 | + if (fs.exists(baseDir)) { |
| 444 | + log.info("{} exists on filesystem, deleting", baseDir); |
| 445 | + assertTrue(fs.delete(baseDir, true), "Failed to deleted " + baseDir); |
| 446 | + } |
| 447 | + log.info("Creating {}", baseDir); |
| 448 | + assertTrue(fs.mkdirs(baseDir), "Failed to create " + baseDir); |
| 449 | + Path exportDir = new Path(baseDir, "export"); |
| 450 | + fs.deleteOnExit(exportDir); |
| 451 | + Path importDirA = new Path(baseDir, "import-a"); |
| 452 | + Path importDirB = new Path(baseDir, "import-b"); |
| 453 | + fs.deleteOnExit(importDirA); |
| 454 | + fs.deleteOnExit(importDirB); |
| 455 | + for (Path p : new Path[] {exportDir, importDirA, importDirB}) { |
| 456 | + assertTrue(fs.mkdirs(p), "Failed to create " + p); |
| 457 | + } |
| 458 | + |
| 459 | + Set<String> importDirs = Set.of(importDirA.toString(), importDirB.toString()); |
| 460 | + |
| 461 | + Path[] importDirAry = new Path[] {importDirA, importDirB}; |
| 462 | + |
| 463 | + log.info("Exporting table to {}", exportDir); |
| 464 | + log.info("Importing table from {}", importDirs); |
| 465 | + |
| 466 | + // test fast fail offline check |
| 467 | + assertThrows(IllegalStateException.class, |
| 468 | + () -> client.tableOperations().exportTable(srcTable, exportDir.toString())); |
| 469 | + |
| 470 | + // Offline the table |
| 471 | + client.tableOperations().offline(srcTable, true); |
| 472 | + // Then export it |
| 473 | + client.tableOperations().exportTable(srcTable, exportDir.toString()); |
| 474 | + |
| 475 | + // Make sure the distcp.txt file that exporttable creates is available |
| 476 | + Path distcp = new Path(exportDir, "distcp.txt"); |
| 477 | + fs.deleteOnExit(distcp); |
| 478 | + assertTrue(fs.exists(distcp), "Distcp file doesn't exist"); |
| 479 | + FSDataInputStream is = fs.open(distcp); |
| 480 | + BufferedReader reader = new BufferedReader(new InputStreamReader(is)); |
| 481 | + |
| 482 | + // Copy each file that was exported to one of the imports directory |
| 483 | + String line; |
| 484 | + |
| 485 | + while ((line = reader.readLine()) != null) { |
| 486 | + Path p = new Path(line.substring(5)); |
| 487 | + assertTrue(fs.exists(p), "File doesn't exist: " + p); |
| 488 | + Path importDir = importDirAry[RANDOM.get().nextInt(importDirAry.length)]; |
| 489 | + Path dest = new Path(importDir, p.getName()); |
| 490 | + assertFalse(fs.exists(dest), "Did not expect " + dest + " to exist"); |
| 491 | + FileUtil.copy(fs, p, fs, dest, false, fs.getConf()); |
| 492 | + } |
| 493 | + |
| 494 | + reader.close(); |
| 495 | + |
| 496 | + log.info("Import dir A: {}", Arrays.toString(fs.listStatus(importDirA))); |
| 497 | + log.info("Import dir B: {}", Arrays.toString(fs.listStatus(importDirB))); |
| 498 | + |
| 499 | + // Import the exported data into a new table |
| 500 | + client.tableOperations().importTable(destTable, importDirs, ImportConfiguration.empty()); |
| 501 | + |
| 502 | + // Get the table ID for the table that the importtable command created |
| 503 | + final String destTableId = client.tableOperations().tableIdMap().get(destTable); |
| 504 | + assertNotNull(destTableId); |
| 505 | + |
| 506 | + // Get all `file` colfams from the metadata table for the new table |
| 507 | + log.info("Imported into table with ID: {}", destTableId); |
| 508 | + |
| 509 | + client.tableOperations().getTabletInformation(destTable, new Range()) |
| 510 | + .forEach(tabletInformation -> assertEquals(TabletAvailability.ONDEMAND, |
| 511 | + tabletInformation.getTabletAvailability(), |
| 512 | + "Expected all tablets in imported table to be ONDEMAND")); |
| 513 | + } |
| 514 | + } |
| 515 | + |
370 | 516 | private boolean verifyMappingsFile(String destTableId) throws IOException {
|
371 | 517 | AccumuloCluster cluster = getCluster();
|
372 | 518 | assertTrue(cluster instanceof MiniAccumuloClusterImpl);
|
|
0 commit comments