@@ -77,54 +77,61 @@ class TestUpdateTable extends HoodieSparkSqlTestBase {
77
77
test(" Test Update Table Without Primary Key" ) {
78
78
withRecordType()(withTempDir { tmp =>
79
79
Seq (" cow" , " mor" ).foreach { tableType =>
80
- val tableName = generateTableName
81
- // create table
82
- spark.sql(
83
- s """
84
- |create table $tableName (
85
- | id int,
86
- | name string,
87
- | price double,
88
- | ts long
89
- |) using hudi
90
- | location ' ${tmp.getCanonicalPath}/ $tableName'
91
- | tblproperties (
92
- | type = ' $tableType',
93
- | preCombineField = 'ts'
94
- | )
95
- """ .stripMargin)
96
-
97
- // insert data to table
98
- spark.sql(s " insert into $tableName select 1, 'a1', 10, 1000 " )
99
- checkAnswer(s " select id, name, price, ts from $tableName" )(
100
- Seq (1 , " a1" , 10.0 , 1000 )
101
- )
80
+ Seq (true , false ).foreach { isPartitioned =>
81
+ val tableName = generateTableName
82
+ val partitionedClause = if (isPartitioned) {
83
+ " PARTITIONED BY (name)"
84
+ } else {
85
+ " "
86
+ }
87
+ // create table
88
+ spark.sql(
89
+ s """
90
+ |create table $tableName (
91
+ | id int,
92
+ | price double,
93
+ | ts long,
94
+ | name string
95
+ |) using hudi
96
+ | location ' ${tmp.getCanonicalPath}/ $tableName'
97
+ | tblproperties (
98
+ | type = ' $tableType',
99
+ | preCombineField = 'ts'
100
+ | )
101
+ | $partitionedClause
102
+ """ .stripMargin)
102
103
103
- // test with optimized sql writes enabled.
104
- spark.sql(s " set ${SPARK_SQL_OPTIMIZED_WRITES .key()}=true " )
104
+ // insert data to table
105
+ spark.sql(s " insert into $tableName select 1,10, 1000, 'a1' " )
106
+ checkAnswer(s " select id, name, price, ts from $tableName" )(
107
+ Seq (1 , " a1" , 10.0 , 1000 )
108
+ )
105
109
106
- // update data
107
- spark.sql(s " update $tableName set price = 20 where id = 1 " )
108
- checkAnswer(s " select id, name, price, ts from $tableName" )(
109
- Seq (1 , " a1" , 20.0 , 1000 )
110
- )
110
+ // test with optimized sql writes enabled.
111
+ spark.sql(s " set ${SPARK_SQL_OPTIMIZED_WRITES .key()}=true " )
111
112
112
- // update data
113
- spark.sql(s " update $tableName set price = price * 2 where id = 1 " )
114
- checkAnswer(s " select id, name, price, ts from $tableName" )(
115
- Seq (1 , " a1" , 40 .0 , 1000 )
116
- )
113
+ // update data
114
+ spark.sql(s " update $tableName set price = 20 where id = 1 " )
115
+ checkAnswer(s " select id, name, price, ts from $tableName" )(
116
+ Seq (1 , " a1" , 20 .0 , 1000 )
117
+ )
117
118
118
- // verify default compaction w/ MOR
119
- if (tableType.equals(HoodieTableType .MERGE_ON_READ )) {
120
- spark.sql(s " update $tableName set price = price * 2 where id = 1 " )
121
- spark.sql(s " update $tableName set price = price * 2 where id = 1 " )
119
+ // update data
122
120
spark.sql(s " update $tableName set price = price * 2 where id = 1 " )
123
- // verify compaction is complete
124
- val metaClient = createMetaClient(spark, tmp.getCanonicalPath + " /" + tableName)
125
- assertEquals(metaClient.getActiveTimeline.getLastCommitMetadataWithValidData.get.getLeft.getAction, " commit" )
126
- }
121
+ checkAnswer(s " select id, name, price, ts from $tableName" )(
122
+ Seq (1 , " a1" , 40.0 , 1000 )
123
+ )
127
124
125
+ // verify default compaction w/ MOR
126
+ if (tableType.equals(HoodieTableType .MERGE_ON_READ )) {
127
+ spark.sql(s " update $tableName set price = price * 2 where id = 1 " )
128
+ spark.sql(s " update $tableName set price = price * 2 where id = 1 " )
129
+ spark.sql(s " update $tableName set price = price * 2 where id = 1 " )
130
+ // verify compaction is complete
131
+ val metaClient = createMetaClient(spark, tmp.getCanonicalPath + " /" + tableName)
132
+ assertEquals(metaClient.getActiveTimeline.getLastCommitMetadataWithValidData.get.getLeft.getAction, " commit" )
133
+ }
134
+ }
128
135
}
129
136
})
130
137
}
0 commit comments