Skip to content

Commit 121758a

Browse files
authored
Convert several ITs to use SharedMiniClusterBase (#5352)
This change migrates several ITs to using SharedMiniClusterBase so that a single cluster is stood up and reused for all the tests in the test class. This has the benefit of speeding up the tests because the entire cluster does not need to be torn down and recreated for each test.
1 parent e8d75df commit 121758a

14 files changed

+323
-143
lines changed

test/src/main/java/org/apache/accumulo/test/AdminCheckIT.java

+18-5
Original file line numberDiff line numberDiff line change
@@ -40,22 +40,35 @@
4040
import org.apache.accumulo.core.client.AccumuloClient;
4141
import org.apache.accumulo.core.client.IteratorSetting;
4242
import org.apache.accumulo.core.client.admin.CompactionConfig;
43+
import org.apache.accumulo.harness.SharedMiniClusterBase;
4344
import org.apache.accumulo.server.ServerContext;
4445
import org.apache.accumulo.server.cli.ServerUtilOpts;
4546
import org.apache.accumulo.server.util.Admin;
4647
import org.apache.accumulo.server.util.checkCommand.CheckRunner;
47-
import org.apache.accumulo.test.functional.ConfigurableMacBase;
4848
import org.apache.accumulo.test.functional.ReadWriteIT;
4949
import org.apache.accumulo.test.functional.SlowIterator;
5050
import org.easymock.EasyMock;
5151
import org.easymock.IAnswer;
52+
import org.junit.jupiter.api.AfterAll;
5253
import org.junit.jupiter.api.AfterEach;
54+
import org.junit.jupiter.api.BeforeAll;
5355
import org.junit.jupiter.api.Test;
5456

5557
import com.beust.jcommander.JCommander;
5658
import com.google.common.collect.Sets;
5759

58-
public class AdminCheckIT extends ConfigurableMacBase {
60+
public class AdminCheckIT extends SharedMiniClusterBase {
61+
62+
@BeforeAll
63+
public static void setup() throws Exception {
64+
SharedMiniClusterBase.startMiniCluster();
65+
}
66+
67+
@AfterAll
68+
public static void teardown() {
69+
SharedMiniClusterBase.stopMiniCluster();
70+
}
71+
5972
private static final PrintStream ORIGINAL_OUT = System.out;
6073

6174
@AfterEach
@@ -308,7 +321,7 @@ public void testPassingTableLocksCheck() throws Exception {
308321
String table = getUniqueNames(1)[0];
309322
Admin.CheckCommand.Check tableLocksCheck = Admin.CheckCommand.Check.TABLE_LOCKS;
310323

311-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
324+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
312325
client.tableOperations().create(table);
313326

314327
ReadWriteIT.ingest(client, 10, 10, 10, 0, table);
@@ -400,7 +413,7 @@ public void testPassingUserFilesCheck() throws Exception {
400413
// Tests the USER_FILES check in the case where it should pass
401414
Admin.CheckCommand.Check userFilesCheck = Admin.CheckCommand.Check.USER_FILES;
402415

403-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
416+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
404417
// create a table, insert some data, and flush so there's a file to check
405418
String table = getUniqueNames(1)[0];
406419
client.tableOperations().create(table);
@@ -447,7 +460,7 @@ private Admin createMockAdmin(boolean[] checksPass) {
447460
Admin.CheckCommand dummyCheckCommand = new DummyCheckCommand(checksPass);
448461
cl.addCommand("check", dummyCheckCommand);
449462
cl.parse(args);
450-
Admin.executeCheckCommand(getServerContext(), dummyCheckCommand, opts);
463+
Admin.executeCheckCommand(getCluster().getServerContext(), dummyCheckCommand, opts);
451464
return null;
452465
});
453466
EasyMock.replay(admin);

test/src/main/java/org/apache/accumulo/test/CloneIT.java

+27-9
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424

2525
import java.util.HashSet;
2626
import java.util.Map.Entry;
27+
import java.util.UUID;
2728
import java.util.stream.Stream;
2829

2930
import org.apache.accumulo.core.client.Accumulo;
@@ -43,23 +44,35 @@
4344
import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
4445
import org.apache.accumulo.core.metadata.schema.TabletDeletedException;
4546
import org.apache.accumulo.core.security.Authorizations;
46-
import org.apache.accumulo.harness.AccumuloClusterHarness;
47+
import org.apache.accumulo.harness.SharedMiniClusterBase;
4748
import org.apache.accumulo.server.util.MetadataTableUtil;
4849
import org.apache.hadoop.fs.Path;
4950
import org.apache.hadoop.io.Text;
51+
import org.junit.jupiter.api.AfterAll;
52+
import org.junit.jupiter.api.BeforeAll;
5053
import org.junit.jupiter.api.Test;
5154
import org.junit.jupiter.api.extension.ExtensionContext;
5255
import org.junit.jupiter.params.ParameterizedTest;
5356
import org.junit.jupiter.params.provider.Arguments;
5457
import org.junit.jupiter.params.provider.ArgumentsProvider;
5558
import org.junit.jupiter.params.provider.ArgumentsSource;
5659

57-
public class CloneIT extends AccumuloClusterHarness {
60+
public class CloneIT extends SharedMiniClusterBase {
61+
62+
@BeforeAll
63+
public static void setup() throws Exception {
64+
SharedMiniClusterBase.startMiniCluster();
65+
}
66+
67+
@AfterAll
68+
public static void teardown() {
69+
SharedMiniClusterBase.stopMiniCluster();
70+
}
5871

5972
@Test
6073
public void testNoFiles() throws Exception {
6174
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
62-
String tableName = getUniqueNames(1)[0];
75+
String tableName = generateTableName();
6376
client.tableOperations().create(tableName);
6477

6578
KeyExtent ke = new KeyExtent(TableId.of("0"), null, null);
@@ -88,7 +101,7 @@ public void testNoFiles() throws Exception {
88101
public void testFilesChange(Range range1, Range range2) throws Exception {
89102
String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
90103
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
91-
String tableName = getUniqueNames(1)[0];
104+
String tableName = generateTableName();
92105
client.tableOperations().create(tableName);
93106

94107
KeyExtent ke = new KeyExtent(TableId.of("0"), null, null);
@@ -150,7 +163,7 @@ public void testSplit1(Range range) throws Exception {
150163
String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
151164

152165
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
153-
String tableName = getUniqueNames(1)[0];
166+
String tableName = generateTableName();
154167
client.tableOperations().create(tableName);
155168

156169
try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -198,7 +211,7 @@ public void testSplit1(Range range) throws Exception {
198211
public void testSplit2(Range range) throws Exception {
199212
String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
200213
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
201-
String tableName = getUniqueNames(1)[0];
214+
String tableName = generateTableName();
202215
client.tableOperations().create(tableName);
203216

204217
try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -280,7 +293,7 @@ private static Mutation createTablet(String tid, String endRow, String prevRow,
280293
public void testSplit3(Range range1, Range range2, Range range3) throws Exception {
281294
String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
282295
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
283-
String tableName = getUniqueNames(1)[0];
296+
String tableName = generateTableName();
284297
client.tableOperations().create(tableName);
285298

286299
try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -329,7 +342,7 @@ public void testSplit3(Range range1, Range range2, Range range3) throws Exceptio
329342
@ArgumentsSource(RangeArgumentsProvider.class)
330343
public void testClonedMarker(Range range1, Range range2, Range range3) throws Exception {
331344
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
332-
String tableName = getUniqueNames(1)[0];
345+
String tableName = generateTableName();
333346
client.tableOperations().create(tableName);
334347
String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
335348

@@ -400,7 +413,7 @@ public void testClonedMarker(Range range1, Range range2, Range range3) throws Ex
400413
public void testMerge(Range range1, Range range2) throws Exception {
401414
String filePrefix = "hdfs://nn:8000/accumulo/tables/0";
402415
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
403-
String tableName = getUniqueNames(1)[0];
416+
String tableName = generateTableName();
404417
client.tableOperations().create(tableName);
405418

406419
try (BatchWriter bw1 = client.createBatchWriter(tableName);
@@ -443,4 +456,9 @@ public Stream<? extends Arguments> provideArguments(ExtensionContext context) {
443456
new Range("row_0", false, "row_1", true), new Range()));
444457
}
445458
}
459+
460+
// Append random text because of parameterized tests repeat same test name
461+
private String generateTableName() {
462+
return getUniqueNames(1)[0] + UUID.randomUUID().toString().substring(0, 8);
463+
}
446464
}

test/src/main/java/org/apache/accumulo/test/LargeSplitRowIT.java

+42-30
Original file line numberDiff line numberDiff line change
@@ -49,31 +49,38 @@
4949
import org.apache.accumulo.core.dataImpl.KeyExtent;
5050
import org.apache.accumulo.core.metadata.schema.TabletMetadata;
5151
import org.apache.accumulo.core.security.Authorizations;
52-
import org.apache.accumulo.miniclusterImpl.MiniAccumuloConfigImpl;
52+
import org.apache.accumulo.harness.SharedMiniClusterBase;
5353
import org.apache.accumulo.server.split.SplitUtils;
5454
import org.apache.accumulo.test.fate.ManagerRepoIT;
55-
import org.apache.accumulo.test.functional.ConfigurableMacBase;
5655
import org.apache.accumulo.test.util.Wait;
57-
import org.apache.hadoop.conf.Configuration;
5856
import org.apache.hadoop.io.Text;
57+
import org.junit.jupiter.api.AfterAll;
58+
import org.junit.jupiter.api.BeforeAll;
5959
import org.junit.jupiter.api.Test;
6060
import org.junit.jupiter.api.Timeout;
6161
import org.slf4j.Logger;
6262
import org.slf4j.LoggerFactory;
6363

64-
public class LargeSplitRowIT extends ConfigurableMacBase {
64+
public class LargeSplitRowIT extends SharedMiniClusterBase {
65+
66+
@BeforeAll
67+
public static void setup() throws Exception {
68+
SharedMiniClusterBase.startMiniClusterWithConfig(
69+
(cfg, coreSite) -> cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1));
70+
}
71+
72+
@AfterAll
73+
public static void teardown() {
74+
SharedMiniClusterBase.stopMiniCluster();
75+
}
76+
6577
private static final Logger log = LoggerFactory.getLogger(LargeSplitRowIT.class);
6678

6779
@Override
6880
protected Duration defaultTimeout() {
6981
return Duration.ofMinutes(1);
7082
}
7183

72-
@Override
73-
public void configure(MiniAccumuloConfigImpl cfg, Configuration hadoopCoreSite) {
74-
cfg.getClusterServerConfiguration().setNumDefaultTabletServers(1);
75-
}
76-
7784
// User added split
7885
@Test
7986
public void userAddedSplit() throws Exception {
@@ -82,7 +89,7 @@ public void userAddedSplit() throws Exception {
8289

8390
// make a table and lower the TABLE_END_ROW_MAX_SIZE property
8491
final String tableName = getUniqueNames(1)[0];
85-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
92+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
8693
Map<String,String> props = Map.of(Property.TABLE_MAX_END_ROW_SIZE.getKey(), "1000");
8794
client.tableOperations().create(tableName, new NewTableConfiguration().setProperties(props));
8895

@@ -128,7 +135,7 @@ public void automaticSplitWith250Same() throws Exception {
128135

129136
// make a table and lower the configuration properties
130137
final String tableName = getUniqueNames(1)[0];
131-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
138+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
132139
// @formatter:off
133140
Map<String,String> props = Map.of(
134141
Property.TABLE_SPLIT_THRESHOLD.getKey(), "10K",
@@ -164,14 +171,16 @@ public void automaticSplitWith250Same() throws Exception {
164171

165172
// Wait for the tablet to be marked as unsplittable due to the system split running
166173
TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(tableName));
167-
Wait.waitFor(() -> getServerContext().getAmple()
168-
.readTablet(new KeyExtent(tableId, null, null)).getUnSplittable() != null,
174+
Wait.waitFor(
175+
() -> getCluster().getServerContext().getAmple()
176+
.readTablet(new KeyExtent(tableId, null, null)).getUnSplittable() != null,
169177
Wait.MAX_WAIT_MILLIS, 100);
170178

171179
// Verify that the unsplittable column is read correctly
172180
TabletMetadata tm =
173-
getServerContext().getAmple().readTablet(new KeyExtent(tableId, null, null));
174-
assertEquals(tm.getUnSplittable(), SplitUtils.toUnSplittable(getServerContext(), tm));
181+
getCluster().getServerContext().getAmple().readTablet(new KeyExtent(tableId, null, null));
182+
assertEquals(tm.getUnSplittable(),
183+
SplitUtils.toUnSplittable(getCluster().getServerContext(), tm));
175184

176185
// Make sure all the data that was put in the table is still correct
177186
int count = 0;
@@ -199,7 +208,7 @@ public void automaticSplitWith250Same() throws Exception {
199208
@Timeout(60)
200209
public void automaticSplitWithGaps() throws Exception {
201210
log.info("Automatic Split With Gaps");
202-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
211+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
203212
automaticSplit(client, 30, 2);
204213
}
205214
}
@@ -209,7 +218,7 @@ public void automaticSplitWithGaps() throws Exception {
209218
@Timeout(60)
210219
public void automaticSplitWithoutGaps() throws Exception {
211220
log.info("Automatic Split Without Gaps");
212-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
221+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
213222
automaticSplit(client, 15, 1);
214223
}
215224
}
@@ -218,7 +227,7 @@ public void automaticSplitWithoutGaps() throws Exception {
218227
@Timeout(120)
219228
public void automaticSplitLater() throws Exception {
220229
log.info("Split later");
221-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
230+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
222231
// Generate large rows which have long common prefixes and therefore no split can be found.
223232
// Setting max to 1 causes all rows to have long common prefixes. Setting a max of greater
224233
// than 1 would generate a row with a short common prefix.
@@ -262,7 +271,7 @@ public void automaticSplitLater() throws Exception {
262271
@Timeout(60)
263272
public void testUnsplittableColumn() throws Exception {
264273
log.info("Unsplittable Column Test");
265-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
274+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
266275
// make a table and lower the configuration properties
267276
// @formatter:off
268277
var maxEndRow = 100;
@@ -298,15 +307,16 @@ public void testUnsplittableColumn() throws Exception {
298307

299308
// Wait for the tablets to be marked as unsplittable due to the system split running
300309
TableId tableId = TableId.of(client.tableOperations().tableIdMap().get(tableName));
301-
Wait.waitFor(() -> getServerContext().getAmple()
302-
.readTablet(new KeyExtent(tableId, null, null)).getUnSplittable() != null,
310+
Wait.waitFor(
311+
() -> getCluster().getServerContext().getAmple()
312+
.readTablet(new KeyExtent(tableId, null, null)).getUnSplittable() != null,
303313
Wait.MAX_WAIT_MILLIS, 100);
304314

305315
// Verify that the unsplittable column is read correctly
306316
TabletMetadata tm =
307-
getServerContext().getAmple().readTablet(new KeyExtent(tableId, null, null));
317+
getCluster().getServerContext().getAmple().readTablet(new KeyExtent(tableId, null, null));
308318
var unsplittable = tm.getUnSplittable();
309-
assertEquals(unsplittable, SplitUtils.toUnSplittable(getServerContext(), tm));
319+
assertEquals(unsplittable, SplitUtils.toUnSplittable(getCluster().getServerContext(), tm));
310320

311321
// Make sure no splits occurred in the table
312322
assertTrue(client.tableOperations().listSplits(tableName).isEmpty());
@@ -318,13 +328,15 @@ public void testUnsplittableColumn() throws Exception {
318328

319329
// wait for the unsplittable marker to be set to a new value due to the property change
320330
Wait.waitFor(() -> {
321-
var updatedUnsplittable = getServerContext().getAmple()
331+
var updatedUnsplittable = getCluster().getServerContext().getAmple()
322332
.readTablet(new KeyExtent(tableId, null, null)).getUnSplittable();
323333
return updatedUnsplittable != null && !updatedUnsplittable.equals(unsplittable);
324334
}, Wait.MAX_WAIT_MILLIS, 100);
325335
// recheck with the computed meta is correct after property update
326-
tm = getServerContext().getAmple().readTablet(new KeyExtent(tableId, null, null));
327-
assertEquals(tm.getUnSplittable(), SplitUtils.toUnSplittable(getServerContext(), tm));
336+
tm = getCluster().getServerContext().getAmple()
337+
.readTablet(new KeyExtent(tableId, null, null));
338+
assertEquals(tm.getUnSplittable(),
339+
SplitUtils.toUnSplittable(getCluster().getServerContext(), tm));
328340

329341
// Bump max end row size and verify split occurs and unsplittable column is cleaned up
330342
client.tableOperations().setProperty(tableName, Property.TABLE_MAX_END_ROW_SIZE.getKey(),
@@ -338,7 +350,7 @@ public void testUnsplittableColumn() throws Exception {
338350
// Verify all tablets have no unsplittable metadata column
339351
Wait.waitFor(() -> {
340352
try (var tabletsMetadata =
341-
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
353+
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build()) {
342354
return tabletsMetadata.stream()
343355
.allMatch(tabletMetadata -> tabletMetadata.getUnSplittable() == null);
344356
}
@@ -355,7 +367,7 @@ public void testUnsplittableColumn() throws Exception {
355367
@Timeout(60)
356368
public void testUnsplittableCleanup() throws Exception {
357369
log.info("Unsplittable Column Cleanup");
358-
try (AccumuloClient client = Accumulo.newClient().from(getClientProperties()).build()) {
370+
try (AccumuloClient client = Accumulo.newClient().from(getClientProps()).build()) {
359371
// make a table and lower the configuration properties
360372
// @formatter:off
361373
Map<String,String> props = Map.of(
@@ -394,7 +406,7 @@ public void testUnsplittableCleanup() throws Exception {
394406
// as unsplittable due to the same end row for all keys after the default tablet is split
395407
Wait.waitFor(() -> {
396408
try (var tabletsMetadata =
397-
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
409+
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build()) {
398410
return tabletsMetadata.stream().anyMatch(tm -> tm.getUnSplittable() != null);
399411
}
400412
}, Wait.MAX_WAIT_MILLIS, 100);
@@ -409,7 +421,7 @@ public void testUnsplittableCleanup() throws Exception {
409421
// same number of splits as before
410422
Wait.waitFor(() -> {
411423
try (var tabletsMetadata =
412-
getServerContext().getAmple().readTablets().forTable(tableId).build()) {
424+
getCluster().getServerContext().getAmple().readTablets().forTable(tableId).build()) {
413425
return tabletsMetadata.stream().allMatch(tm -> tm.getUnSplittable() == null);
414426
}
415427
}, Wait.MAX_WAIT_MILLIS, 100);

0 commit comments

Comments
 (0)