18
18
*/
19
19
package org .apache .accumulo .test .functional ;
20
20
21
+ import static java .util .concurrent .TimeUnit .MILLISECONDS ;
22
+
21
23
import java .time .Duration ;
22
24
import java .util .HashMap ;
23
25
import java .util .Map ;
26
28
import org .apache .accumulo .core .client .AccumuloClient ;
27
29
import org .apache .accumulo .core .client .admin .NewTableConfiguration ;
28
30
import org .apache .accumulo .core .client .admin .TabletAvailability ;
31
+ import org .apache .accumulo .core .client .admin .servers .ServerId ;
29
32
import org .apache .accumulo .core .conf .Property ;
33
+ import org .apache .accumulo .core .util .Timer ;
30
34
import org .apache .accumulo .harness .AccumuloClusterHarness ;
31
35
import org .apache .accumulo .minicluster .ServerType ;
32
36
import org .apache .accumulo .test .VerifyIngest ;
@@ -58,16 +62,30 @@ protected Duration defaultTimeout() {
58
62
@ BeforeEach
59
63
public void alterConfig () throws Exception {
60
64
try (AccumuloClient client = Accumulo .newClient ().from (getClientProps ()).build ()) {
65
+ final int initialTserverCount =
66
+ client .instanceOperations ().getServers (ServerId .Type .TABLET_SERVER ).size ();
67
+ log .info ("Tserver count: {}" , initialTserverCount );
68
+ Timer timer = Timer .startNew ();
61
69
getClusterControl ().stopAllServers (ServerType .TABLET_SERVER );
70
+ Wait .waitFor (
71
+ () -> client .instanceOperations ().getServers (ServerId .Type .TABLET_SERVER ).isEmpty (),
72
+ 120_000 );
73
+ log .info ("Took {} ms to stop all tservers" , timer .elapsed (MILLISECONDS ));
74
+ timer .restart ();
62
75
getClusterControl ().startAllServers (ServerType .TABLET_SERVER );
76
+ Wait .waitFor (() -> client .instanceOperations ().getServers (ServerId .Type .TABLET_SERVER ).size ()
77
+ < initialTserverCount , 120_000 );
78
+ log .info ("Took {} ms to start all tservers" , timer .elapsed (MILLISECONDS ));
63
79
64
80
FileSystem fs = cluster .getFileSystem ();
65
81
testDir = new Path (cluster .getTemporaryPath (), "testmf" );
66
82
fs .deleteOnExit (testDir );
83
+
84
+ timer .restart ();
67
85
FunctionalTestUtils .createRFiles (client , fs , testDir .toString (), ROWS , SPLITS , 8 );
86
+ long elapsed = timer .elapsed (MILLISECONDS );
68
87
FileStatus [] stats = fs .listStatus (testDir );
69
-
70
- log .info ("Number of generated files: {}" , stats .length );
88
+ log .info ("Generated {} files in {} ms" , stats .length , elapsed );
71
89
}
72
90
}
73
91
@@ -82,24 +100,35 @@ public void resetConfig() throws Exception {
82
100
83
101
@ Test
84
102
public void testBulkSplitOptimization () throws Exception {
103
+ log .info ("Starting BulkSplitOptimizationIT test" );
85
104
try (AccumuloClient c = Accumulo .newClient ().from (getClientProps ()).build ()) {
105
+
86
106
final String tableName = getUniqueNames (1 )[0 ];
87
107
Map <String ,String > tableProps = new HashMap <>();
88
108
tableProps .put (Property .TABLE_MAJC_RATIO .getKey (), "1000" );
89
109
tableProps .put (Property .TABLE_FILE_MAX .getKey (), "1000" );
90
110
tableProps .put (Property .TABLE_SPLIT_THRESHOLD .getKey (), "1G" );
111
+
112
+ log .info ("Creating table {}" , tableName );
113
+ Timer timer = Timer .startNew ();
91
114
c .tableOperations ().create (tableName , new NewTableConfiguration ().setProperties (tableProps )
92
115
.withInitialTabletAvailability (TabletAvailability .HOSTED ));
116
+ log .info ("Created table in {} ms. Starting bulk import" , timer .elapsed (MILLISECONDS ));
93
117
94
- log . info ( "Starting bulk import" );
118
+ timer . restart ( );
95
119
c .tableOperations ().importDirectory (testDir .toString ()).to (tableName ).load ();
120
+ log .info ("Imported into table {} in {} ms" , tableName , timer .elapsed (MILLISECONDS ));
96
121
122
+ timer .restart ();
97
123
FunctionalTestUtils .checkSplits (c , tableName , 0 , 0 );
98
124
FunctionalTestUtils .checkRFiles (c , tableName , 1 , 1 , 100 , 100 );
125
+ log .info ("Checked splits and rfiles in {} ms" , timer .elapsed (MILLISECONDS ));
99
126
100
127
log .info ("Lowering split threshold to 100K to initiate splits" );
101
128
c .tableOperations ().setProperty (tableName , Property .TABLE_SPLIT_THRESHOLD .getKey (), "100K" );
102
129
130
+ timer .restart ();
131
+
103
132
// wait until over split threshold -- should be 78 splits
104
133
Wait .waitFor (() -> {
105
134
try {
@@ -114,6 +143,8 @@ public void testBulkSplitOptimization() throws Exception {
114
143
return true ;
115
144
});
116
145
146
+ log .info ("Took {} ms for split count to reach expected range" , timer .elapsed (MILLISECONDS ));
147
+
117
148
VerifyParams params = new VerifyParams (getClientProps (), tableName , ROWS );
118
149
params .timestamp = 1 ;
119
150
params .dataSize = 50 ;
0 commit comments