Skip to content

Commit 807eb29

Browse files
committed
tapdb: implement migration for unique index change in universe_leaves table
This commit introduces a new migration (Migration 30) that modifies the unique index on the `universe_leaves` table. The unique constraint is updated from two columns (`minting_point`, `script_key_bytes`) to three columns (`minting_point`, `script_key_bytes`, `leaf_node_namespace`). Additionally, a new test case, `TestMigration30`, is added to verify the behavior of the database before and after the migration. The test ensures that inserting a duplicate row with the same `minting_point` and `script_key_bytes` but a different `leaf_node_namespace` fails before the migration and succeeds after the migration is applied. Furthermore, the `universe_leaves` table schema is updated in the generated SQL schema file to reflect the new unique constraint. Other minor changes include the addition of new proof types in the `universe` package and the introduction of new types related to ignore tuples, enhancing the overall functionality of the codebase.
1 parent dc3af8e commit 807eb29

8 files changed

+511
-14
lines changed

tapdb/migrations_test.go

+88
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package tapdb
22

33
import (
44
"context"
5+
"fmt"
56
"os"
67
"path/filepath"
78
"regexp"
@@ -467,3 +468,90 @@ func TestMigration29(t *testing.T) {
467468
require.NoError(t, err)
468469
require.Equal(t, "ignore", proofType)
469470
}
471+
472+
// TestMigration30 tests the migration that changes the UNIQUE index on the
473+
// universe_leaves table from two columns (minting_point, script_key_bytes) to
474+
// three columns (minting_point, script_key_bytes, leaf_node_namespace).
475+
func TestMigration30(t *testing.T) {
476+
ctx := context.Background()
477+
478+
// Create a test DB at the pre-migration state (version 29).
479+
db := NewTestDBWithVersion(t, 29)
480+
481+
// Insert test data from file.
482+
InsertTestdata(t, db.BaseDB, "migrations_test_00030_dummy_data.sql")
483+
484+
// Attempt to insert a duplicate leaf row (same minting_point and
485+
// script_key_bytes) but with a different leaf_node_namespace "test_ns".
486+
// Under the old unique constraint, this should error.
487+
//
488+
//nolint:lll
489+
const dupLeafStmt = `
490+
INSERT INTO universe_leaves (
491+
id, asset_genesis_id, minting_point, script_key_bytes,
492+
universe_root_id, leaf_node_key, leaf_node_namespace
493+
) VALUES (
494+
%d, 1, X'0A0B0C', X'00112233445566778899AABBCCDDEEFF00112233445566778899AABBCCDDEEFF',
495+
999, X'BB', 'test_ns'
496+
)
497+
`
498+
499+
dupQuery := transformByteLiterals(
500+
t, db.BaseDB, fmt.Sprintf(dupLeafStmt, 101),
501+
)
502+
_, err := db.ExecContext(ctx, dupQuery)
503+
require.Error(
504+
t, err,
505+
"duplicate insert should fail under the old unique constraint",
506+
)
507+
508+
// Check error message, which differs between SQLite and Postgres
509+
errMsg := err.Error()
510+
switch db.Backend() {
511+
case sqlc.BackendTypeSqlite:
512+
require.Contains(
513+
t, errMsg,
514+
"constraint failed: UNIQUE constraint failed: "+
515+
"universe_leaves.minting_point, "+
516+
"universe_leaves.script_key_bytes",
517+
"SQLite error should contain the expected unique "+
518+
"constraint failure")
519+
case sqlc.BackendTypePostgres:
520+
require.Contains(
521+
t, errMsg, "duplicate key value violates unique "+
522+
"constraint", "postgres error should mention "+
523+
"duplicate key violation")
524+
default:
525+
t.Fatalf("unknown database backend: %v", db.Backend())
526+
}
527+
528+
// Run migration 30 (apply the up migration that updates the unique
529+
// constraint).
530+
err = db.ExecuteMigrations(TargetVersion(30))
531+
require.NoError(t, err)
532+
533+
// Verify that the dummy row inserted from the testdata file was
534+
// migrated properly.
535+
var ns string
536+
err = db.QueryRowContext(ctx, transformByteLiterals(t, db.BaseDB, `
537+
SELECT leaf_node_namespace FROM universe_leaves WHERE id = 100
538+
`)).Scan(&ns)
539+
require.NoError(t, err)
540+
require.Equal(
541+
t, "old_ns", ns, "pre-existing leaf should have its namespace "+
542+
"unchanged",
543+
)
544+
545+
// Now, with the new three-column unique constraint in place, attempting
546+
// to insert a row with the same minting_point and script_key_bytes but
547+
// a different namespace ("test_ns") should succeed.
548+
dupQuery2 := transformByteLiterals(
549+
t, db.BaseDB, fmt.Sprintf(dupLeafStmt, 102),
550+
)
551+
_, err = db.ExecContext(ctx, dupQuery2)
552+
require.NoError(
553+
t, err,
554+
"duplicate insert should now succeed with the new unique "+
555+
"constraint",
556+
)
557+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,126 @@
1+
-- Migration 30 Downgrade: Revert UNIQUE constraint on universe_leaves table.
2+
-- =================================================================
3+
--
4+
-- OVERVIEW:
5+
--
6+
-- This downgrade script reverts the changes made in migration 30, rolling back
7+
-- from a three-column UNIQUE constraint (minting_point, script_key_bytes, leaf_node_namespace)
8+
-- to the original two-column constraint (minting_point, script_key_bytes).
9+
--
10+
-- DOWNGRADE STRATEGY:
11+
--
12+
-- Since the enhanced constraint allowed multiple entries with the same minting_point
13+
-- and script_key_bytes (differing only by leaf_node_namespace), we need to be
14+
-- selective about which rows to keep when reverting to the more restrictive constraint.
15+
--
16+
-- This script:
17+
-- 1. Handles foreign key dependencies (backing up federation_proof_sync_log).
18+
-- 2. Creates a new table with the original two-column constraint.
19+
-- 3. Selectively migrates data (keeping only one row per minting_point/script_key_bytes pair).
20+
-- 4. Replaces the current table with the downgraded version.
21+
-- 5. Restores dependent tables with proper references.
22+
--
23+
-- NOTE: This downgrade will result in data loss where multiple universe entries
24+
-- existed for the same asset across different universe trees.
25+
--
26+
27+
-- ==== PHASE 1: HANDLE FOREIGN KEY DEPENDENCIES ====
28+
-- Before we can drop the universe_leaves table, we need to temporarily remove
29+
-- any foreign key references pointing to it.
30+
31+
-- Create a temporary backup table for federation_proof_sync_log.
32+
CREATE TABLE new_federation_proof_sync_log (
33+
id INTEGER PRIMARY KEY,
34+
status TEXT NOT NULL CHECK(status IN ('pending', 'complete')),
35+
timestamp TIMESTAMP NOT NULL,
36+
attempt_counter BIGINT NOT NULL DEFAULT 0,
37+
sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')),
38+
proof_leaf_id BIGINT NOT NULL, -- FK constraint intentionally omitted for now.
39+
universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id),
40+
servers_id BIGINT NOT NULL REFERENCES universe_servers(id)
41+
);
42+
43+
-- Backup all existing federation_proof_sync_log data.
44+
INSERT INTO new_federation_proof_sync_log
45+
SELECT * FROM federation_proof_sync_log;
46+
47+
-- Remove the table with the foreign key constraint to universe_leaves.
48+
-- This allows us to safely drop universe_leaves later.
49+
DROP TABLE federation_proof_sync_log;
50+
51+
-- ==== PHASE 2: CREATE NEW TABLE WITH ORIGINAL CONSTRAINT ====
52+
-- Create a new table with the original two-column unique constraint.
53+
CREATE TABLE old_universe_leaves (
54+
id INTEGER PRIMARY KEY,
55+
asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id),
56+
minting_point BLOB NOT NULL,
57+
script_key_bytes BLOB NOT NULL CHECK(LENGTH(script_key_bytes) = 32),
58+
universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id),
59+
leaf_node_key BLOB,
60+
leaf_node_namespace VARCHAR NOT NULL,
61+
-- The original, more restrictive unique constraint.
62+
UNIQUE(minting_point, script_key_bytes)
63+
);
64+
65+
-- ==== PHASE 3: SELECTIVE DATA MIGRATION ====
66+
-- Copy data from the current table to the new one, but we must be selective
67+
-- to avoid violating the more restrictive unique constraint.
68+
-- For each (minting_point, script_key_bytes) group, we keep only the row with the lowest ID.
69+
INSERT INTO old_universe_leaves (
70+
id,
71+
asset_genesis_id,
72+
minting_point,
73+
script_key_bytes,
74+
universe_root_id,
75+
leaf_node_key,
76+
leaf_node_namespace
77+
)
78+
SELECT ul.id,
79+
ul.asset_genesis_id,
80+
ul.minting_point,
81+
ul.script_key_bytes,
82+
ul.universe_root_id,
83+
ul.leaf_node_key,
84+
ul.leaf_node_namespace
85+
FROM universe_leaves ul
86+
JOIN (
87+
-- This subquery identifies the lowest ID for each unique combination
88+
-- of minting_point and script_key_bytes.
89+
SELECT minting_point, script_key_bytes, MIN(id) AS min_id
90+
FROM universe_leaves
91+
GROUP BY minting_point, script_key_bytes
92+
) sub ON ul.id = sub.min_id;
93+
94+
-- ==== PHASE 4: TABLE REPLACEMENT ====
95+
-- Remove the current table with the three-column constraint.
96+
DROP TABLE universe_leaves;
97+
98+
-- Rename the new table to replace the existing one.
99+
ALTER TABLE old_universe_leaves RENAME TO universe_leaves;
100+
101+
-- Recreate the indexes that existed on the original table.
102+
CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves(leaf_node_key);
103+
CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves(leaf_node_namespace);
104+
105+
-- ==== PHASE 5: RESTORE DEPENDENT TABLES ====
106+
-- Recreate the federation_proof_sync_log table with proper foreign key references.
107+
CREATE TABLE federation_proof_sync_log (
108+
id INTEGER PRIMARY KEY,
109+
status TEXT NOT NULL CHECK(status IN ('pending', 'complete')),
110+
timestamp TIMESTAMP NOT NULL,
111+
attempt_counter BIGINT NOT NULL DEFAULT 0,
112+
sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')),
113+
-- Now we can safely reference the new universe_leaves table.
114+
proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id),
115+
universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id),
116+
servers_id BIGINT NOT NULL REFERENCES universe_servers(id)
117+
);
118+
119+
-- Restore federation_proof_sync_log data, but only for leaves that still exist.
120+
-- Some leaves may have been dropped during the selective migration process.
121+
INSERT INTO federation_proof_sync_log
122+
SELECT * FROM new_federation_proof_sync_log
123+
WHERE proof_leaf_id IN (SELECT id FROM universe_leaves);
124+
125+
-- Clean up by dropping the temporary table.
126+
DROP TABLE new_federation_proof_sync_log;
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,155 @@
1+
-- Migration 30: Update UNIQUE constraint on universe_leaves table.
2+
-- =================================================================
3+
--
4+
-- OVERVIEW:
5+
--
6+
-- This migration modifies the UNIQUE constraint on the universe_leaves table
7+
-- to allow assets to exist in multiple universe trees simultaneously. We change
8+
-- from a two-column constraint (minting_point, script_key_bytes) to a three-column
9+
-- constraint (minting_point, script_key_bytes, leaf_node_namespace).
10+
--
11+
-- PROBLEM STATEMENT:
12+
--
13+
-- In the current schema, the universe_leaves table enforces uniqueness based on
14+
-- minting_point and script_key_bytes. This design assumes an asset belongs to only
15+
-- one type of universe tree (transfer or issuance). However, with the introduction
16+
-- of "ignore" and "burn" universe trees, the same asset might need to exist in
17+
-- multiple trees simultaneously.
18+
--
19+
-- SOLUTION:
20+
--
21+
-- We expand the unique constraint to include the leaf_node_namespace column. This
22+
-- additional dimension allows distinguishing between assets based on which universe
23+
-- tree they belong to, while still preventing duplicates within the same tree.
24+
--
25+
-- MIGRATION STRATEGY:
26+
--
27+
-- Since SQLite has limited ALTER TABLE capabilities and we need to maintain
28+
-- compatibility with both SQLite and PostgreSQL, we use a table recreation approach:
29+
--
30+
-- 1. Handle foreign key dependencies (back up and remove federation_proof_sync_log).
31+
-- 2. Create a new table with the updated constraint.
32+
-- 3. Copy existing data.
33+
-- 4. Replace the old table with the new one.
34+
-- 5. Restore dependent tables with proper references.
35+
--
36+
-- This approach works with both database engines while preserving data integrity.
37+
--
38+
39+
-- ==== PHASE 1: HANDLE FOREIGN KEY DEPENDENCIES ====
40+
-- Before we can drop the universe_leaves table, we need to temporarily remove
41+
-- any foreign key references pointing to it. The federation_proof_sync_log table
42+
-- has a foreign key to universe_leaves.id that would prevent dropping the table.
43+
44+
-- Create a temporary backup table for federation_proof_sync_log.
45+
CREATE TABLE new_federation_proof_sync_log (
46+
id INTEGER PRIMARY KEY,
47+
status TEXT NOT NULL CHECK(status IN ('pending', 'complete')),
48+
timestamp TIMESTAMP NOT NULL,
49+
attempt_counter BIGINT NOT NULL DEFAULT 0,
50+
sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')),
51+
proof_leaf_id BIGINT NOT NULL, -- FK constraint intentionally omitted for now.
52+
universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id),
53+
servers_id BIGINT NOT NULL REFERENCES universe_servers(id)
54+
);
55+
56+
57+
-- Backup all existing federation_proof_sync_log data.
58+
INSERT INTO new_federation_proof_sync_log
59+
SELECT * FROM federation_proof_sync_log;
60+
61+
-- Remove the table with the foreign key constraint to universe_leaves.
62+
-- This allows us to safely drop universe_leaves later.
63+
DROP TABLE federation_proof_sync_log;
64+
DROP INDEX IF EXISTS federation_proof_sync_log_unique_index_proof_leaf_id_servers_id;
65+
66+
-- ==== PHASE 2: CREATE NEW TABLE WITH UPDATED CONSTRAINT ====
67+
-- Create a new universe_leaves table with the enhanced 3-column unique constraint.
68+
CREATE TABLE new_universe_leaves (
69+
id INTEGER PRIMARY KEY,
70+
asset_genesis_id BIGINT NOT NULL REFERENCES genesis_assets(gen_asset_id),
71+
minting_point BLOB NOT NULL,
72+
script_key_bytes BLOB NOT NULL CHECK(LENGTH(script_key_bytes) = 32),
73+
universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id),
74+
leaf_node_key BLOB,
75+
leaf_node_namespace VARCHAR NOT NULL,
76+
UNIQUE(minting_point, script_key_bytes, leaf_node_namespace)
77+
);
78+
79+
-- ==== PHASE 3: MIGRATE DATA ====
80+
-- Copy all existing data from the original table to the new one.
81+
INSERT INTO new_universe_leaves (
82+
id,
83+
asset_genesis_id,
84+
minting_point,
85+
script_key_bytes,
86+
universe_root_id,
87+
leaf_node_key,
88+
leaf_node_namespace
89+
)
90+
SELECT
91+
id,
92+
asset_genesis_id,
93+
minting_point,
94+
script_key_bytes,
95+
universe_root_id,
96+
leaf_node_key,
97+
leaf_node_namespace
98+
FROM universe_leaves;
99+
100+
-- ==== PHASE 4: TABLE REPLACEMENT ====
101+
-- Now that data is safely copied, remove the old table.
102+
DROP TABLE universe_leaves;
103+
104+
-- Rename the new table to replace the old one.
105+
ALTER TABLE new_universe_leaves RENAME TO universe_leaves;
106+
107+
-- Recreate indexes that existed on the original table.
108+
CREATE INDEX IF NOT EXISTS universe_leaves_key_idx ON universe_leaves(leaf_node_key);
109+
CREATE INDEX IF NOT EXISTS universe_leaves_namespace ON universe_leaves(leaf_node_namespace);
110+
111+
-- ==== PHASE 5: RESTORE DEPENDENT TABLES ====
112+
-- Recreate the federation_proof_sync_log table with proper foreign key references.
113+
CREATE TABLE federation_proof_sync_log (
114+
id INTEGER PRIMARY KEY,
115+
116+
-- The status of the proof sync attempt.
117+
status TEXT NOT NULL CHECK(status IN ('pending', 'complete')),
118+
119+
-- The timestamp of when the log entry for the associated proof was last
120+
-- updated.
121+
timestamp TIMESTAMP NOT NULL,
122+
123+
-- The number of attempts that have been made to sync the proof.
124+
attempt_counter BIGINT NOT NULL DEFAULT 0,
125+
126+
-- The direction of the proof sync attempt.
127+
sync_direction TEXT NOT NULL CHECK(sync_direction IN ('push', 'pull')),
128+
129+
-- The ID of the subject proof leaf.
130+
proof_leaf_id BIGINT NOT NULL REFERENCES universe_leaves(id),
131+
132+
-- The ID of the universe that the proof leaf belongs to.
133+
universe_root_id BIGINT NOT NULL REFERENCES universe_roots(id),
134+
135+
-- The ID of the server that the proof will be/was synced to.
136+
servers_id BIGINT NOT NULL REFERENCES universe_servers(id)
137+
);
138+
139+
-- Restore valid federation_proof_sync_log data.
140+
-- Only reinsert records that reference existing leaves in the universe_leaves table.
141+
INSERT INTO federation_proof_sync_log
142+
SELECT * FROM new_federation_proof_sync_log
143+
WHERE proof_leaf_id IN (SELECT id FROM universe_leaves);
144+
145+
-- Clean up by dropping the temporary table.
146+
DROP TABLE new_federation_proof_sync_log;
147+
148+
-- Re-create the unique index on table new_federation_proof_sync_log.
149+
CREATE UNIQUE INDEX federation_proof_sync_log_unique_index_proof_leaf_id_servers_id
150+
ON federation_proof_sync_log (
151+
sync_direction,
152+
proof_leaf_id,
153+
universe_root_id,
154+
servers_id
155+
);

tapdb/sqlc/queries/universe.sql

+3-2
Original file line numberDiff line numberDiff line change
@@ -43,11 +43,12 @@ INSERT INTO universe_leaves (
4343
) VALUES (
4444
@asset_genesis_id, @script_key_bytes, @universe_root_id, @leaf_node_key,
4545
@leaf_node_namespace, @minting_point
46-
) ON CONFLICT (minting_point, script_key_bytes)
46+
) ON CONFLICT (minting_point, script_key_bytes, leaf_node_namespace)
4747
-- This is a NOP, minting_point and script_key_bytes are the unique fields
4848
-- that caused the conflict.
4949
DO UPDATE SET minting_point = EXCLUDED.minting_point,
50-
script_key_bytes = EXCLUDED.script_key_bytes;
50+
script_key_bytes = EXCLUDED.script_key_bytes,
51+
leaf_node_namespace = EXCLUDED.leaf_node_namespace;
5152

5253
-- name: DeleteUniverseLeaves :exec
5354
DELETE FROM universe_leaves

0 commit comments

Comments
 (0)