diff --git a/crates/squawk_ide/src/expand_selection.rs b/crates/squawk_ide/src/expand_selection.rs index f39260d0..0a6eb3a9 100644 --- a/crates/squawk_ide/src/expand_selection.rs +++ b/crates/squawk_ide/src/expand_selection.rs @@ -41,6 +41,7 @@ const DELIMITED_LIST_KINDS: &[SyntaxKind] = &[ SyntaxKind::COLUMN_LIST, SyntaxKind::CONFLICT_INDEX_ITEM_LIST, SyntaxKind::CONSTRAINT_EXCLUSION_LIST, + SyntaxKind::COPY_OPTION_LIST, SyntaxKind::DROP_OP_CLASS_OPTION_LIST, SyntaxKind::FDW_OPTION_LIST, SyntaxKind::FUNCTION_SIG_LIST, @@ -51,6 +52,7 @@ const DELIMITED_LIST_KINDS: &[SyntaxKind] = &[ SyntaxKind::OP_SIG_LIST, SyntaxKind::PARAM_LIST, SyntaxKind::PARTITION_ITEM_LIST, + SyntaxKind::PARTITION_LIST, SyntaxKind::RETURNING_OPTION_LIST, SyntaxKind::REVOKE_COMMAND_LIST, SyntaxKind::ROLE_LIST, diff --git a/crates/squawk_parser/src/generated/syntax_kind.rs b/crates/squawk_parser/src/generated/syntax_kind.rs index 5366e75c..c22ad4c6 100644 --- a/crates/squawk_parser/src/generated/syntax_kind.rs +++ b/crates/squawk_parser/src/generated/syntax_kind.rs @@ -211,6 +211,7 @@ pub enum SyntaxKind { HOUR_KW, IDENTITY_KW, IF_KW, + IGNORE_KW, ILIKE_KW, IMMEDIATE_KW, IMMUTABLE_KW, @@ -278,6 +279,7 @@ pub enum SyntaxKind { LOCK_KW, LOCKED_KW, LOGGED_KW, + LSN_KW, MAPPING_KW, MATCH_KW, MATCHED_KW, @@ -346,6 +348,7 @@ pub enum SyntaxKind { PARSER_KW, PARTIAL_KW, PARTITION_KW, + PARTITIONS_KW, PASSING_KW, PASSWORD_KW, PATH_KW, @@ -387,6 +390,7 @@ pub enum SyntaxKind { REPLACE_KW, REPLICA_KW, RESET_KW, + RESPECT_KW, RESTART_KW, RESTRICT_KW, RETURN_KW, @@ -429,6 +433,7 @@ pub enum SyntaxKind { SNAPSHOT_KW, SOME_KW, SOURCE_KW, + SPLIT_KW, SQL_KW, STABLE_KW, STANDALONE_KW, @@ -503,6 +508,7 @@ pub enum SyntaxKind { VIEWS_KW, VIRTUAL_KW, VOLATILE_KW, + WAIT_KW, WHEN_KW, WHERE_KW, WHITESPACE_KW, @@ -647,6 +653,8 @@ pub enum SyntaxKind { CONSTRAINT_INDEX_METHOD, CONSTRAINT_INDEX_TABLESPACE, COPY, + COPY_OPTION, + COPY_OPTION_LIST, COST_FUNC_OPTION, CREATE_ACCESS_METHOD, CREATE_AGGREGATE, @@ -905,6 +913,7 @@ pub enum SyntaxKind { MERGE_DELETE, MERGE_DO_NOTHING, MERGE_INSERT, + MERGE_PARTITIONS, MERGE_UPDATE, MERGE_WHEN_MATCHED, MERGE_WHEN_NOT_MATCHED_SOURCE, @@ -969,6 +978,7 @@ pub enum SyntaxKind { PARAM_VARIADIC, PAREN_EXPR, PAREN_SELECT, + PARTITION, PARTITION_BY, PARTITION_DEFAULT, PARTITION_FOR_VALUES_FROM, @@ -976,6 +986,7 @@ pub enum SyntaxKind { PARTITION_FOR_VALUES_WITH, PARTITION_ITEM, PARTITION_ITEM_LIST, + PARTITION_LIST, PARTITION_OF, PATH, PATH_SEGMENT, @@ -1090,6 +1101,7 @@ pub enum SyntaxKind { SORT_DESC, SORT_USING, SOURCE_FILE, + SPLIT_PARTITION, STORAGE, STRICT_FUNC_OPTION, SUBSTRING_FN, @@ -1529,6 +1541,8 @@ impl SyntaxKind { SyntaxKind::IDENTITY_KW } else if ident.eq_ignore_ascii_case("if") { SyntaxKind::IF_KW + } else if ident.eq_ignore_ascii_case("ignore") { + SyntaxKind::IGNORE_KW } else if ident.eq_ignore_ascii_case("ilike") { SyntaxKind::ILIKE_KW } else if ident.eq_ignore_ascii_case("immediate") { @@ -1663,6 +1677,8 @@ impl SyntaxKind { SyntaxKind::LOCKED_KW } else if ident.eq_ignore_ascii_case("logged") { SyntaxKind::LOGGED_KW + } else if ident.eq_ignore_ascii_case("lsn") { + SyntaxKind::LSN_KW } else if ident.eq_ignore_ascii_case("mapping") { SyntaxKind::MAPPING_KW } else if ident.eq_ignore_ascii_case("match") { @@ -1799,6 +1815,8 @@ impl SyntaxKind { SyntaxKind::PARTIAL_KW } else if ident.eq_ignore_ascii_case("partition") { SyntaxKind::PARTITION_KW + } else if ident.eq_ignore_ascii_case("partitions") { + SyntaxKind::PARTITIONS_KW } else if ident.eq_ignore_ascii_case("passing") { SyntaxKind::PASSING_KW } else if ident.eq_ignore_ascii_case("password") { @@ -1881,6 +1899,8 @@ impl SyntaxKind { SyntaxKind::REPLICA_KW } else if ident.eq_ignore_ascii_case("reset") { SyntaxKind::RESET_KW + } else if ident.eq_ignore_ascii_case("respect") { + SyntaxKind::RESPECT_KW } else if ident.eq_ignore_ascii_case("restart") { SyntaxKind::RESTART_KW } else if ident.eq_ignore_ascii_case("restrict") { @@ -1965,6 +1985,8 @@ impl SyntaxKind { SyntaxKind::SOME_KW } else if ident.eq_ignore_ascii_case("source") { SyntaxKind::SOURCE_KW + } else if ident.eq_ignore_ascii_case("split") { + SyntaxKind::SPLIT_KW } else if ident.eq_ignore_ascii_case("sql") { SyntaxKind::SQL_KW } else if ident.eq_ignore_ascii_case("stable") { @@ -2113,6 +2135,8 @@ impl SyntaxKind { SyntaxKind::VIRTUAL_KW } else if ident.eq_ignore_ascii_case("volatile") { SyntaxKind::VOLATILE_KW + } else if ident.eq_ignore_ascii_case("wait") { + SyntaxKind::WAIT_KW } else if ident.eq_ignore_ascii_case("when") { SyntaxKind::WHEN_KW } else if ident.eq_ignore_ascii_case("where") { diff --git a/crates/squawk_parser/src/generated/token_sets.rs b/crates/squawk_parser/src/generated/token_sets.rs index e7c441a9..d257ef1b 100644 --- a/crates/squawk_parser/src/generated/token_sets.rs +++ b/crates/squawk_parser/src/generated/token_sets.rs @@ -132,6 +132,7 @@ pub(crate) const COLUMN_OR_TABLE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::HOUR_KW, SyntaxKind::IDENTITY_KW, SyntaxKind::IF_KW, + SyntaxKind::IGNORE_KW, SyntaxKind::IMMEDIATE_KW, SyntaxKind::IMMUTABLE_KW, SyntaxKind::IMPLICIT_KW, @@ -183,6 +184,7 @@ pub(crate) const COLUMN_OR_TABLE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::LOCK_KW, SyntaxKind::LOCKED_KW, SyntaxKind::LOGGED_KW, + SyntaxKind::LSN_KW, SyntaxKind::MAPPING_KW, SyntaxKind::MATCH_KW, SyntaxKind::MATCHED_KW, @@ -240,6 +242,7 @@ pub(crate) const COLUMN_OR_TABLE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::PARSER_KW, SyntaxKind::PARTIAL_KW, SyntaxKind::PARTITION_KW, + SyntaxKind::PARTITIONS_KW, SyntaxKind::PASSING_KW, SyntaxKind::PASSWORD_KW, SyntaxKind::PATH_KW, @@ -278,6 +281,7 @@ pub(crate) const COLUMN_OR_TABLE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::REPLACE_KW, SyntaxKind::REPLICA_KW, SyntaxKind::RESET_KW, + SyntaxKind::RESPECT_KW, SyntaxKind::RESTART_KW, SyntaxKind::RESTRICT_KW, SyntaxKind::RETURN_KW, @@ -314,6 +318,7 @@ pub(crate) const COLUMN_OR_TABLE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::SMALLINT_KW, SyntaxKind::SNAPSHOT_KW, SyntaxKind::SOURCE_KW, + SyntaxKind::SPLIT_KW, SyntaxKind::SQL_KW, SyntaxKind::STABLE_KW, SyntaxKind::STANDALONE_KW, @@ -374,6 +379,7 @@ pub(crate) const COLUMN_OR_TABLE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::VIEWS_KW, SyntaxKind::VIRTUAL_KW, SyntaxKind::VOLATILE_KW, + SyntaxKind::WAIT_KW, SyntaxKind::WHITESPACE_KW, SyntaxKind::WITHIN_KW, SyntaxKind::WITHOUT_KW, @@ -536,6 +542,7 @@ pub(crate) const TYPE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::HOUR_KW, SyntaxKind::IDENTITY_KW, SyntaxKind::IF_KW, + SyntaxKind::IGNORE_KW, SyntaxKind::ILIKE_KW, SyntaxKind::IMMEDIATE_KW, SyntaxKind::IMMUTABLE_KW, @@ -594,6 +601,7 @@ pub(crate) const TYPE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::LOCK_KW, SyntaxKind::LOCKED_KW, SyntaxKind::LOGGED_KW, + SyntaxKind::LSN_KW, SyntaxKind::MAPPING_KW, SyntaxKind::MATCH_KW, SyntaxKind::MATCHED_KW, @@ -655,6 +663,7 @@ pub(crate) const TYPE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::PARSER_KW, SyntaxKind::PARTIAL_KW, SyntaxKind::PARTITION_KW, + SyntaxKind::PARTITIONS_KW, SyntaxKind::PASSING_KW, SyntaxKind::PASSWORD_KW, SyntaxKind::PATH_KW, @@ -693,6 +702,7 @@ pub(crate) const TYPE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::REPLACE_KW, SyntaxKind::REPLICA_KW, SyntaxKind::RESET_KW, + SyntaxKind::RESPECT_KW, SyntaxKind::RESTART_KW, SyntaxKind::RESTRICT_KW, SyntaxKind::RETURN_KW, @@ -731,6 +741,7 @@ pub(crate) const TYPE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::SMALLINT_KW, SyntaxKind::SNAPSHOT_KW, SyntaxKind::SOURCE_KW, + SyntaxKind::SPLIT_KW, SyntaxKind::SQL_KW, SyntaxKind::STABLE_KW, SyntaxKind::STANDALONE_KW, @@ -793,6 +804,7 @@ pub(crate) const TYPE_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::VIEWS_KW, SyntaxKind::VIRTUAL_KW, SyntaxKind::VOLATILE_KW, + SyntaxKind::WAIT_KW, SyntaxKind::WHITESPACE_KW, SyntaxKind::WITHIN_KW, SyntaxKind::WITHOUT_KW, @@ -994,6 +1006,7 @@ pub(crate) const ALL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::HOUR_KW, SyntaxKind::IDENTITY_KW, SyntaxKind::IF_KW, + SyntaxKind::IGNORE_KW, SyntaxKind::ILIKE_KW, SyntaxKind::IMMEDIATE_KW, SyntaxKind::IMMUTABLE_KW, @@ -1061,6 +1074,7 @@ pub(crate) const ALL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::LOCK_KW, SyntaxKind::LOCKED_KW, SyntaxKind::LOGGED_KW, + SyntaxKind::LSN_KW, SyntaxKind::MAPPING_KW, SyntaxKind::MATCH_KW, SyntaxKind::MATCHED_KW, @@ -1129,6 +1143,7 @@ pub(crate) const ALL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::PARSER_KW, SyntaxKind::PARTIAL_KW, SyntaxKind::PARTITION_KW, + SyntaxKind::PARTITIONS_KW, SyntaxKind::PASSING_KW, SyntaxKind::PASSWORD_KW, SyntaxKind::PATH_KW, @@ -1170,6 +1185,7 @@ pub(crate) const ALL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::REPLACE_KW, SyntaxKind::REPLICA_KW, SyntaxKind::RESET_KW, + SyntaxKind::RESPECT_KW, SyntaxKind::RESTART_KW, SyntaxKind::RESTRICT_KW, SyntaxKind::RETURN_KW, @@ -1212,6 +1228,7 @@ pub(crate) const ALL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::SNAPSHOT_KW, SyntaxKind::SOME_KW, SyntaxKind::SOURCE_KW, + SyntaxKind::SPLIT_KW, SyntaxKind::SQL_KW, SyntaxKind::STABLE_KW, SyntaxKind::STANDALONE_KW, @@ -1286,6 +1303,7 @@ pub(crate) const ALL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::VIEWS_KW, SyntaxKind::VIRTUAL_KW, SyntaxKind::VOLATILE_KW, + SyntaxKind::WAIT_KW, SyntaxKind::WHEN_KW, SyntaxKind::WHERE_KW, SyntaxKind::WHITESPACE_KW, @@ -1539,6 +1557,7 @@ pub(crate) const BARE_LABEL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::LOCK_KW, SyntaxKind::LOCKED_KW, SyntaxKind::LOGGED_KW, + SyntaxKind::LSN_KW, SyntaxKind::MAPPING_KW, SyntaxKind::MATCH_KW, SyntaxKind::MATCHED_KW, @@ -1599,6 +1618,7 @@ pub(crate) const BARE_LABEL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::PARSER_KW, SyntaxKind::PARTIAL_KW, SyntaxKind::PARTITION_KW, + SyntaxKind::PARTITIONS_KW, SyntaxKind::PASSING_KW, SyntaxKind::PASSWORD_KW, SyntaxKind::PATH_KW, @@ -1679,6 +1699,7 @@ pub(crate) const BARE_LABEL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::SNAPSHOT_KW, SyntaxKind::SOME_KW, SyntaxKind::SOURCE_KW, + SyntaxKind::SPLIT_KW, SyntaxKind::SQL_KW, SyntaxKind::STABLE_KW, SyntaxKind::STANDALONE_KW, @@ -1750,6 +1771,7 @@ pub(crate) const BARE_LABEL_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::VIEWS_KW, SyntaxKind::VIRTUAL_KW, SyntaxKind::VOLATILE_KW, + SyntaxKind::WAIT_KW, SyntaxKind::WHEN_KW, SyntaxKind::WHITESPACE_KW, SyntaxKind::WORK_KW, @@ -1888,6 +1910,7 @@ pub(crate) const UNRESERVED_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::HOUR_KW, SyntaxKind::IDENTITY_KW, SyntaxKind::IF_KW, + SyntaxKind::IGNORE_KW, SyntaxKind::IMMEDIATE_KW, SyntaxKind::IMMUTABLE_KW, SyntaxKind::IMPLICIT_KW, @@ -1923,6 +1946,7 @@ pub(crate) const UNRESERVED_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::LOCK_KW, SyntaxKind::LOCKED_KW, SyntaxKind::LOGGED_KW, + SyntaxKind::LSN_KW, SyntaxKind::MAPPING_KW, SyntaxKind::MATCH_KW, SyntaxKind::MATCHED_KW, @@ -1971,6 +1995,7 @@ pub(crate) const UNRESERVED_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::PARSER_KW, SyntaxKind::PARTIAL_KW, SyntaxKind::PARTITION_KW, + SyntaxKind::PARTITIONS_KW, SyntaxKind::PASSING_KW, SyntaxKind::PASSWORD_KW, SyntaxKind::PATH_KW, @@ -2006,6 +2031,7 @@ pub(crate) const UNRESERVED_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::REPLACE_KW, SyntaxKind::REPLICA_KW, SyntaxKind::RESET_KW, + SyntaxKind::RESPECT_KW, SyntaxKind::RESTART_KW, SyntaxKind::RESTRICT_KW, SyntaxKind::RETURN_KW, @@ -2039,6 +2065,7 @@ pub(crate) const UNRESERVED_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::SKIP_KW, SyntaxKind::SNAPSHOT_KW, SyntaxKind::SOURCE_KW, + SyntaxKind::SPLIT_KW, SyntaxKind::SQL_KW, SyntaxKind::STABLE_KW, SyntaxKind::STANDALONE_KW, @@ -2092,6 +2119,7 @@ pub(crate) const UNRESERVED_KEYWORDS: TokenSet = TokenSet::new(&[ SyntaxKind::VIEWS_KW, SyntaxKind::VIRTUAL_KW, SyntaxKind::VOLATILE_KW, + SyntaxKind::WAIT_KW, SyntaxKind::WHITESPACE_KW, SyntaxKind::WITHIN_KW, SyntaxKind::WITHOUT_KW, diff --git a/crates/squawk_parser/src/grammar.rs b/crates/squawk_parser/src/grammar.rs index 8eba8568..4985ec88 100644 --- a/crates/squawk_parser/src/grammar.rs +++ b/crates/squawk_parser/src/grammar.rs @@ -1848,6 +1848,8 @@ fn opt_type_name_with(p: &mut Parser<'_>, type_args_enabled: bool) -> Option { p.bump(BIT_KW); @@ -2162,10 +2164,13 @@ fn opt_filter_clause(p: &mut Parser<'_>) { } fn opt_over_clause(p: &mut Parser<'_>) { - if p.at(OVER_KW) { + if p.at(OVER_KW) || p.at(RESPECT_KW) || p.at(IGNORE_KW) { // OVER window_name // OVER ( window_definition ) let m = p.start(); + if p.eat(RESPECT_KW) || p.eat(IGNORE_KW) { + p.expect(NULLS_KW); + } p.expect(OVER_KW); if p.eat(L_PAREN) { window_spec(p); @@ -7820,7 +7825,11 @@ fn alter_trigger(p: &mut Parser<'_>) -> CompletedMarker { if p.at(RENAME_KW) { rename_to(p); } else { - depends_on_extension(p); + if p.at(NO_KW) || p.at(DEPENDS_KW) { + depends_on_extension(p); + } else { + p.error(format!("expected NO or DEPENDS, found {:?}", p.current())); + } } m.complete(p, ALTER_TRIGGER) } @@ -9111,8 +9120,11 @@ fn create_publication(p: &mut Parser<'_>) -> CompletedMarker { p.bump(PUBLICATION_KW); name(p); if p.eat(FOR_KW) { - if p.eat(ALL_KW) { - p.expect(TABLES_KW); + if p.at(ALL_KW) { + publication_all_object(p); + while !p.at(EOF) && p.eat(COMMA) { + publication_all_object(p); + } } else { publication_object(p); while !p.at(EOF) && p.eat(COMMA) { @@ -9124,6 +9136,16 @@ fn create_publication(p: &mut Parser<'_>) -> CompletedMarker { m.complete(p, CREATE_PUBLICATION) } +fn publication_all_object(p: &mut Parser<'_>) { + p.expect(ALL_KW); + if !p.eat(TABLES_KW) && !p.eat(SEQUENCES_KW) { + p.error(format!( + "expected TABLES or SEQUENCES, got {:?}", + p.current() + )); + } +} + // CREATE ROLE name [ [ WITH ] option [ ... ] ] // where option can be: // SUPERUSER | NOSUPERUSER @@ -11729,20 +11751,24 @@ fn opt_vacuum_option(p: &mut Parser<'_>) -> Option { // | '(' copy_generic_opt_arg_list ')' // | /* EMPTY */ fn opt_copy_option(p: &mut Parser) -> bool { - col_label(p); + let m = p.start(); + if !opt_col_label(p) { + m.abandon(p); + return false; + } copy_option_arg(p); + m.complete(p, COPY_OPTION); true } fn copy_option_arg(p: &mut Parser<'_>) { match p.current() { - STAR | DEFAULT_KW => { + STAR | DEFAULT_KW | ON_KW | OFF_KW => { p.bump_any(); } L_PAREN => { copy_option_list(p); } - ON_KW => {} _ => { if p.at_ts(NON_RESERVED_WORD) { p.bump_any(); @@ -11762,6 +11788,7 @@ fn copy_option_arg(p: &mut Parser<'_>) { } fn copy_option_list(p: &mut Parser<'_>) { + let m = p.start(); delimited( p, L_PAREN, @@ -11771,6 +11798,7 @@ fn copy_option_list(p: &mut Parser<'_>) { COL_LABEL_FIRST, opt_copy_option, ); + m.complete(p, COPY_OPTION_LIST); } fn opt_copy_option_item(p: &mut Parser<'_>) -> bool { @@ -11888,7 +11916,7 @@ fn copy(p: &mut Parser<'_>) -> CompletedMarker { } if p.eat(FROM_KW) { // STDIN - if p.eat(STDIN_KW) { + if p.eat(STDIN_KW) || p.eat(STDOUT_KW) { // PROGRAM 'command' } else if p.eat(PROGRAM_KW) { string_literal(p); @@ -12435,6 +12463,9 @@ fn set_expr_list_or_paren_select(p: &mut Parser<'_>) { } else { set_expr_list(p, m); } + } else { + p.error("expected row expression or sub-select"); + m.abandon(p); } } @@ -13468,6 +13499,7 @@ fn config_value(p: &mut Parser<'_>) -> bool { && opt_numeric_literal(p).is_none() && opt_name_ref(p).is_none() && !opt_bool_literal(p) + && !p.eat(NULL_KW) { break; } @@ -13600,6 +13632,12 @@ fn opt_relation_name(p: &mut Parser<'_>) -> Option { // ATTACH PARTITION partition_name { FOR VALUES partition_bound_spec | DEFAULT } // ALTER TABLE [ IF EXISTS ] name // DETACH PARTITION partition_name [ CONCURRENTLY | FINALIZE ] +// ALTER TABLE [ IF EXISTS ] name +// MERGE PARTITIONS (partition_name1, partition_name2 [, ...]) INTO partition_name +// ALTER TABLE [ IF EXISTS ] name +// SPLIT PARTITION partition_name INTO +// (PARTITION partition_name1 { FOR VALUES partition_bound_spec | DEFAULT }, +// PARTITION partition_name2 { FOR VALUES partition_bound_spec | DEFAULT } [, ...]) // // ALTER TABLE [ IF EXISTS ] [ ONLY ] name [ * ] // action [, ... ] @@ -13692,6 +13730,8 @@ const ALTER_TABLE_ACTION_FIRST: TokenSet = TokenSet::new(&[ CLUSTER_KW, OWNER_KW, DETACH_KW, + MERGE_KW, + SPLIT_KW, DROP_KW, ADD_KW, ATTACH_KW, @@ -13889,6 +13929,26 @@ fn opt_alter_table_action(p: &mut Parser<'_>) -> Option { } m.complete(p, DETACH_PARTITION) } + MERGE_KW => { + let m = p.start(); + p.bump(MERGE_KW); + p.expect(PARTITIONS_KW); + p.expect(L_PAREN); + path_name_ref_list(p); + p.expect(R_PAREN); + p.eat(INTO_KW); + path_name(p); + m.complete(p, MERGE_PARTITIONS) + } + SPLIT_KW => { + let m = p.start(); + p.bump(SPLIT_KW); + p.expect(PARTITION_KW); + path_name_ref(p); + p.expect(INTO_KW); + partition_list(p); + m.complete(p, SPLIT_PARTITION) + } // DROP [ COLUMN ] [ IF EXISTS ] column_name [ RESTRICT | CASCADE ] // DROP CONSTRAINT [ IF EXISTS ] constraint_name [ RESTRICT | CASCADE ] DROP_KW => { @@ -14079,6 +14139,31 @@ fn opt_alter_table_action(p: &mut Parser<'_>) -> Option { Some(cm) } +fn partition_list(p: &mut Parser<'_>) { + let m = p.start(); + fn opt_partition(p: &mut Parser<'_>) -> bool { + let m = p.start(); + if !p.eat(PARTITION_KW) { + m.abandon(p); + return false; + } + path_name_ref(p); + partition_option(p); + m.complete(p, PARTITION); + true + } + delimited( + p, + L_PAREN, + R_PAREN, + COMMA, + || "unexpected comma".to_string(), + TokenSet::new(&[PARTITION_KW]), + opt_partition, + ); + m.complete(p, PARTITION_LIST); +} + // /* Column label --- allowed labels in "AS" clauses. // * This presently includes *all* Postgres keywords. // */ diff --git a/crates/squawk_parser/tests/data/ok/select_casts.sql b/crates/squawk_parser/tests/data/ok/select_casts.sql index 7f19e4c7..19ba6e75 100644 --- a/crates/squawk_parser/tests/data/ok/select_casts.sql +++ b/crates/squawk_parser/tests/data/ok/select_casts.sql @@ -17,6 +17,10 @@ select '{}' :: int[] :: int8[] :: numeric[1]; select '{}'::int[]::int8[]::numeric[1]; +-- setof +select cast('1' as setof int); +select '1'::setof int; + -- based on postgres' gram.y -- Bit diff --git a/crates/squawk_parser/tests/data/regression_suite/advisory_lock.sql b/crates/squawk_parser/tests/data/regression_suite/advisory_lock.sql index fb3709e1..ac0ad503 100644 --- a/crates/squawk_parser/tests/data/regression_suite/advisory_lock.sql +++ b/crates/squawk_parser/tests/data/regression_suite/advisory_lock.sql @@ -2,7 +2,7 @@ -- ADVISORY LOCKS -- -SELECT oid AS datoid FROM pg_database WHERE datname = current_database() ; +SELECT oid AS datoid FROM pg_database WHERE datname = current_database() /* \gset */; BEGIN; diff --git a/crates/squawk_parser/tests/data/regression_suite/aggregates.sql b/crates/squawk_parser/tests/data/regression_suite/aggregates.sql index 77c75b20..343eefcc 100644 --- a/crates/squawk_parser/tests/data/regression_suite/aggregates.sql +++ b/crates/squawk_parser/tests/data/regression_suite/aggregates.sql @@ -3,6 +3,7 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR -- avoid bit-exact output here because operations may not be bit-exact. SET extra_float_digits = 0; @@ -13,6 +14,7 @@ CREATE TABLE aggtest ( b float4 ); +-- \set filename :abs_srcdir '/data/agg.data' COPY aggtest FROM 'filename'; ANALYZE aggtest; @@ -138,6 +140,24 @@ SELECT covar_pop(1::float8,2::float8), covar_samp(3::float8,4::float8); SELECT covar_pop(1::float8,'inf'::float8), covar_samp(3::float8,'inf'::float8); SELECT covar_pop(1::float8,'nan'::float8), covar_samp(3::float8,'nan'::float8); +-- check some cases that formerly had poor roundoff-error behavior +SELECT corr(0.09, g), regr_r2(0.09, g) + FROM generate_series(1, 30) g; +SELECT corr(g, 0.09), regr_r2(g, 0.09), regr_slope(g, 0.09), regr_intercept(g, 0.09) + FROM generate_series(1, 30) g; +SELECT corr(1.3 + g * 1e-16, 1.3 + g * 1e-16) + FROM generate_series(1, 3) g; +SELECT corr(1e-100 + g * 1e-105, 1e-100 + g * 1e-105) + FROM generate_series(1, 3) g; +SELECT corr(1e-100 + g * 1e-105, 1e-100 + g * 1e-105) + FROM generate_series(1, 30) g; + +-- these examples pose definitional questions for NaN inputs, +-- which we resolve by saying that an all-NaN input column is not all equal +SELECT corr(g, 'NaN') FROM generate_series(1, 30) g; +SELECT corr(0.1, 'NaN') FROM generate_series(1, 30) g; +SELECT corr('NaN', 'NaN') FROM generate_series(1, 30) g; + -- test accum and combine functions directly CREATE TABLE regr_test (x float8, y float8); INSERT INTO regr_test VALUES (10,150),(20,250),(30,350),(80,540),(100,200); @@ -146,7 +166,7 @@ FROM regr_test WHERE x IN (10,20,30,80); SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) FROM regr_test; SELECT float8_accum('{4,140,2900}'::float8[], 100); -SELECT float8_regr_accum('{4,140,2900,1290,83075,15050}'::float8[], 200, 100); +SELECT float8_regr_accum('{4,140,2900,1290,83075,15050,100,0}'::float8[], 200, 100); SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) FROM regr_test WHERE x IN (10,20,30); SELECT count(*), sum(x), regr_sxx(y,x), sum(y),regr_syy(y,x), regr_sxy(y,x) @@ -154,12 +174,12 @@ FROM regr_test WHERE x IN (80,100); SELECT float8_combine('{3,60,200}'::float8[], '{0,0,0}'::float8[]); SELECT float8_combine('{0,0,0}'::float8[], '{2,180,200}'::float8[]); SELECT float8_combine('{3,60,200}'::float8[], '{2,180,200}'::float8[]); -SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], - '{0,0,0,0,0,0}'::float8[]); -SELECT float8_regr_combine('{0,0,0,0,0,0}'::float8[], - '{2,180,200,740,57800,-3400}'::float8[]); -SELECT float8_regr_combine('{3,60,200,750,20000,2000}'::float8[], - '{2,180,200,740,57800,-3400}'::float8[]); +SELECT float8_regr_combine('{3,60,200,750,20000,2000,1,NaN}'::float8[], + '{0,0,0,0,0,0,0,0}'::float8[]); +SELECT float8_regr_combine('{0,0,0,0,0,0,0,0}'::float8[], + '{2,180,200,740,57800,-3400,NaN,1}'::float8[]); +SELECT float8_regr_combine('{3,60,200,750,20000,2000,7,8}'::float8[], + '{2,180,200,740,57800,-3400,7,9}'::float8[]); DROP TABLE regr_test; -- test count, distinct @@ -180,6 +200,11 @@ SELECT newcnt(*) AS cnt_1000 FROM onek; SELECT oldcnt(*) AS cnt_1000 FROM onek; SELECT sum2(q1,q2) FROM int8_tbl; +-- sanity checks +SELECT sum(q1+q2), sum(q1)+sum(q2) FROM int8_tbl; +SELECT sum(q1-q2), sum(q2-q1), sum(q1)-sum(q2) FROM int8_tbl; +SELECT sum(q1*2000), sum(-q1*2000), 2000*sum(q1) FROM int8_tbl; + -- test for outer-level aggregates -- this should work @@ -240,6 +265,11 @@ SELECT BIT_XOR(i8) AS "?" FROM bitwise_test; +COPY bitwise_test FROM STDIN NULL 'null'; +-- 1 1 1 1 1 B0101 +-- 3 3 3 null 2 B0100 +-- 7 7 7 3 4 B1100 +-- \. SELECT BIT_AND(i2) AS "1", @@ -309,6 +339,11 @@ SELECT BOOL_OR(b3) AS "n" FROM bool_test; +COPY bool_test FROM STDIN NULL 'null'; +-- TRUE null FALSE null +-- FALSE TRUE null null +-- null TRUE FALSE null +-- \. SELECT BOOL_AND(b1) AS "f", @@ -399,11 +434,16 @@ explain (costs off) select max(unique2), generate_series(1,3) as g from tenk1 order by g desc; select max(unique2), generate_series(1,3) as g from tenk1 order by g desc; --- interesting corner case: constant gets optimized into a seqscan +-- two interesting corner cases: both non-null and null constant gets +-- optimized into a seqscan explain (costs off) select max(100) from tenk1; select max(100) from tenk1; +explain (costs off) + select max(null) from tenk1; +select max(null) from tenk1; + -- try it on an inheritance tree create table minmaxtest(f1 int); create table minmaxtest1() inherits (minmaxtest); @@ -532,6 +572,60 @@ drop table t2; drop table t3; drop table p_t1; +-- +-- Test GROUP BY ALL +-- +-- We don't care about the data here, just the proper transformation of the +-- GROUP BY clause, so test some queries and verify the EXPLAIN plans. +-- + +CREATE TEMP TABLE t1 ( + a int, + b int, + c int +); + +-- basic example +EXPLAIN (COSTS OFF) SELECT b, COUNT(*) FROM t1 GROUP BY ALL; + +-- multiple columns, non-consecutive order +EXPLAIN (COSTS OFF) SELECT a, SUM(b), b FROM t1 GROUP BY ALL; + +-- multi columns, no aggregate +EXPLAIN (COSTS OFF) SELECT a + b FROM t1 GROUP BY ALL; + +-- check we detect a non-top-level aggregate +EXPLAIN (COSTS OFF) SELECT a, SUM(b) + 4 FROM t1 GROUP BY ALL; + +-- including grouped column is okay +EXPLAIN (COSTS OFF) SELECT a, SUM(b) + a FROM t1 GROUP BY ALL; + +-- including non-grouped column, not so much +EXPLAIN (COSTS OFF) SELECT a, SUM(b) + c FROM t1 GROUP BY ALL; + +-- all aggregates, should reduce to GROUP BY () +EXPLAIN (COSTS OFF) SELECT COUNT(a), SUM(b) FROM t1 GROUP BY ALL; + +-- likewise with empty target list +EXPLAIN (COSTS OFF) SELECT FROM t1 GROUP BY ALL; + +-- window functions are not to be included in GROUP BY, either +EXPLAIN (COSTS OFF) SELECT a, COUNT(a) OVER (PARTITION BY a) FROM t1 GROUP BY ALL; + +-- all cols +EXPLAIN (COSTS OFF) SELECT *, count(*) FROM t1 GROUP BY ALL; + +-- group by all with grouping element(s) (equivalent to GROUP BY's +-- default behavior, explicit antithesis to GROUP BY DISTINCT) +EXPLAIN (COSTS OFF) SELECT a, count(*) FROM t1 GROUP BY ALL a; + +-- verify deparsing of GROUP BY ALL +CREATE TEMP VIEW v1 AS SELECT b, COUNT(*) FROM t1 GROUP BY ALL; +SELECT pg_get_viewdef('v1'::regclass); + +DROP VIEW v1; +DROP TABLE t1; + -- -- Test GROUP BY matching of join columns that are type-coerced due to USING -- @@ -1037,6 +1131,43 @@ select cleast_agg(4.5,f1) from int4_tbl; select cleast_agg(variadic array[4.5,f1]) from int4_tbl; select pg_typeof(cleast_agg(variadic array[4.5,f1])) from int4_tbl; +-- +-- Test SupportRequestSimplifyAggref code +-- +begin; +create table agg_simplify (a int, not_null_col int not null, nullable_col int); + +-- Ensure count(not_null_col) uses count(*) +explain (costs off, verbose) +select count(not_null_col) from agg_simplify; + +-- Ensure count() uses count(*) +explain (costs off, verbose) +select count('bananas') from agg_simplify; + +-- Ensure count(null) isn't optimized +explain (costs off, verbose) +select count(null) from agg_simplify; + +-- Ensure count(nullable_col) does not use count(*) +explain (costs off, verbose) +select count(nullable_col) from agg_simplify; + +-- Ensure there's no optimization with DISTINCT aggs +explain (costs off, verbose) +select count(distinct not_null_col) from agg_simplify; + +-- Ensure there's no optimization with ORDER BY aggs +explain (costs off, verbose) +select count(not_null_col order by not_null_col) from agg_simplify; + +-- Ensure we don't optimize to count(*) with agglevelsup > 0 +explain (costs off, verbose) +select a from agg_simplify a group by a +having exists (select 1 from onek b where count(a.not_null_col) = b.four); + +rollback; + -- test aggregates with common transition functions share the same states begin work; @@ -1493,15 +1624,6 @@ select v||'a', case when v||'a' = 'aa' then 1 else 0 end, count(*) from unnest(array['a','b']) u(v) group by v||'a' order by 1; --- Make sure that generation of HashAggregate for uniqification purposes --- does not lead to array overflow due to unexpected duplicate hash keys --- see CAFeeJoKKu0u+A_A9R9316djW-YW3-+Gtgvy3ju655qRHR3jtdA@mail.gmail.com -set enable_memoize to off; -explain (costs off) - select 1 from tenk1 - where (hundred, thousand) in (select twothousand, twothousand from onek); -reset enable_memoize; - -- -- Hash Aggregation Spill tests -- diff --git a/crates/squawk_parser/tests/data/regression_suite/alter_generic.sql b/crates/squawk_parser/tests/data/regression_suite/alter_generic.sql index c5346327..f4efd49b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/alter_generic.sql +++ b/crates/squawk_parser/tests/data/regression_suite/alter_generic.sql @@ -3,7 +3,10 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix CREATE FUNCTION test_opclass_options_func(internal) RETURNS void diff --git a/crates/squawk_parser/tests/data/regression_suite/alter_table.sql b/crates/squawk_parser/tests/data/regression_suite/alter_table.sql index a269a28d..becb32db 100644 --- a/crates/squawk_parser/tests/data/regression_suite/alter_table.sql +++ b/crates/squawk_parser/tests/data/regression_suite/alter_table.sql @@ -148,6 +148,7 @@ ALTER INDEX attmp_idx ALTER COLUMN 1 SET STATISTICS 1000; ALTER INDEX attmp_idx ALTER COLUMN 2 SET STATISTICS 1000; +-- \d+ attmp_idx ALTER INDEX attmp_idx ALTER COLUMN 3 SET STATISTICS 1000; @@ -297,15 +298,23 @@ ALTER TABLE onek DROP CONSTRAINT onek_unique1_constraint_foo; -- renaming constraints vs. inheritance CREATE TABLE constraint_rename_test (a int CONSTRAINT con1 CHECK (a > 0), b int, c int); +-- \d constraint_rename_test CREATE TABLE constraint_rename_test2 (a int CONSTRAINT con1 CHECK (a > 0), d int) INHERITS (constraint_rename_test); +-- \d constraint_rename_test2 ALTER TABLE constraint_rename_test2 RENAME CONSTRAINT con1 TO con1foo; -- fail ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- fail ALTER TABLE constraint_rename_test RENAME CONSTRAINT con1 TO con1foo; -- ok +-- \d constraint_rename_test +-- \d constraint_rename_test2 ALTER TABLE constraint_rename_test ADD CONSTRAINT con2 CHECK (b > 0) NO INHERIT; ALTER TABLE ONLY constraint_rename_test RENAME CONSTRAINT con2 TO con2foo; -- ok ALTER TABLE constraint_rename_test RENAME CONSTRAINT con2foo TO con2bar; -- ok +-- \d constraint_rename_test +-- \d constraint_rename_test2 ALTER TABLE constraint_rename_test ADD CONSTRAINT con3 PRIMARY KEY (a); ALTER TABLE constraint_rename_test RENAME CONSTRAINT con3 TO con3foo; -- ok +-- \d constraint_rename_test +-- \d constraint_rename_test2 DROP TABLE constraint_rename_test2; DROP TABLE constraint_rename_test; ALTER TABLE IF EXISTS constraint_not_exist RENAME CONSTRAINT con3 TO con3foo; -- ok @@ -321,6 +330,7 @@ ALTER TABLE constraint_rename_cache RENAME CONSTRAINT constraint_rename_cache_pkey TO constraint_rename_pkey_new; CREATE TABLE like_constraint_rename_cache (LIKE constraint_rename_cache INCLUDING ALL); +-- \d like_constraint_rename_cache DROP TABLE constraint_rename_cache; DROP TABLE like_constraint_rename_cache; @@ -439,6 +449,7 @@ DROP TABLE attmp2; set constraint_exclusion TO 'partition'; create table nv_parent (d date, check (false) no inherit not valid); -- not valid constraint added at creation time should automatically become valid +-- \d nv_parent create table nv_child_2010 () inherits (nv_parent); create table nv_child_2011 () inherits (nv_parent); @@ -454,6 +465,7 @@ explain (costs off) select * from nv_parent where d between '2009-08-01'::date a -- add an inherited NOT VALID constraint alter table nv_parent add check (d between '2001-01-01'::date and '2099-12-31'::date) not valid; +-- \d nv_child_2009 -- we leave nv_parent and children around to help test pg_dump logic -- Foreign key adding test with mixed types @@ -840,9 +852,12 @@ alter table non_existent alter column bar drop not null; -- test checking for null values and primary key create table atacc1 (test int not null); alter table atacc1 add constraint "atacc1_pkey" primary key (test); +-- \d atacc1 alter table atacc1 alter column test drop not null; +-- \d atacc1 alter table atacc1 drop constraint "atacc1_pkey"; alter table atacc1 alter column test drop not null; +-- \d atacc1 insert into atacc1 values (null); alter table atacc1 alter test set not null; delete from atacc1; @@ -853,7 +868,7 @@ alter table atacc1 alter bar set not null; alter table atacc1 alter bar drop not null; -- try creating a view and altering that, should fail -create view myview as select * from atacc1; +-- create view myview as select * from atacc1; alter table myview alter column test drop not null; alter table myview alter column test set not null; drop view myview; @@ -902,8 +917,10 @@ CREATE TABLE atnnpart1 (col1 int, id int); ALTER TABLE atnnpart1 ADD CONSTRAINT another_constr NOT NULL id; ALTER TABLE atnnpart1 ADD PRIMARY KEY (id); ALTER TABLE atnnparted ATTACH PARTITION atnnpart1 FOR VALUES IN ('1'); +-- \d+ atnnpart* BEGIN; ALTER TABLE atnnparted VALIDATE CONSTRAINT dummy_constr; +-- \d+ atnnpart* ROLLBACK; -- leave a table in this state for the pg_upgrade test @@ -1027,10 +1044,10 @@ alter table atacc1 SET WITHOUT OIDS; -- alter table atacc1 SET WITH OIDS; -- try dropping the xmin column, should fail -alter table atacc1 drop xmin; +-- alter table atacc1 drop xmin; -- try creating a view and altering that, should fail -create view myview as select * from atacc1; +-- create view myview as select * from atacc1; select * from myview; alter table myview drop d; drop view myview; @@ -1133,14 +1150,17 @@ copy attest(a) to stdout; copy attest("........pg.dropped.1........") to stdout; copy attest from stdin; -- 10 11 12 +-- \. select * from attest; copy attest from stdin; -- 21 22 +-- \. select * from attest; copy attest(a) from stdin; copy attest("........pg.dropped.1........") from stdin; copy attest(b,c) from stdin; -- 31 32 +-- \. select * from attest; drop table attest; @@ -1380,12 +1400,14 @@ alter table anothertab create index on anothertab(f2,f3); create unique index on anothertab(f4); +-- \d anothertab alter table anothertab alter column f1 type bigint; alter table anothertab alter column f2 type bigint, alter column f3 type bigint, alter column f4 type bigint; alter table anothertab alter column f5 type bigint; +-- \d anothertab drop table anothertab; @@ -1438,8 +1460,13 @@ create table at_part_2 (b text, a int); insert into at_part_2 values ('1.234', 1024); create index on at_partitioned (b); create index on at_partitioned (a); +-- \d at_part_1 +-- \d at_part_2 alter table at_partitioned attach partition at_part_2 for values from (1000) to (2000); +-- \d at_part_2 alter table at_partitioned alter column b type numeric using b::numeric; +-- \d at_part_1 +-- \d at_part_2 drop table at_partitioned; -- Alter column type when no table rewrite is required @@ -1532,15 +1559,21 @@ create table test_storage_failed (a text, b int storage extended); -- test that SET STORAGE propagates to index correctly create index test_storage_idx on test_storage (b, a); alter table test_storage alter column a set storage external; +-- \d+ test_storage +-- \d+ test_storage_idx -- ALTER COLUMN TYPE with a check constraint and a child table (bug #13779) CREATE TABLE test_inh_check (a float check (a > 10.2), b float); CREATE TABLE test_inh_check_child() INHERITS(test_inh_check); +-- \d test_inh_check +-- \d test_inh_check_child select relname, conname, coninhcount, conislocal, connoinherit from pg_constraint c, pg_class r where relname like 'test_inh_check%' and c.conrelid = r.oid order by 1, 2; ALTER TABLE test_inh_check ALTER COLUMN a TYPE numeric; +-- \d test_inh_check +-- \d test_inh_check_child select relname, conname, coninhcount, conislocal, connoinherit from pg_constraint c, pg_class r where relname like 'test_inh_check%' and c.conrelid = r.oid @@ -1550,11 +1583,15 @@ ALTER TABLE test_inh_check ADD CONSTRAINT bnoinherit CHECK (b > 100) NO INHERIT; ALTER TABLE test_inh_check_child ADD CONSTRAINT blocal CHECK (b < 1000); ALTER TABLE test_inh_check_child ADD CONSTRAINT bmerged CHECK (b > 1); ALTER TABLE test_inh_check ADD CONSTRAINT bmerged CHECK (b > 1); +-- \d test_inh_check +-- \d test_inh_check_child select relname, conname, coninhcount, conislocal, connoinherit from pg_constraint c, pg_class r where relname like 'test_inh_check%' and c.conrelid = r.oid order by 1, 2; ALTER TABLE test_inh_check ALTER COLUMN b TYPE numeric; +-- \d test_inh_check +-- \d test_inh_check_child select relname, conname, coninhcount, conislocal, connoinherit from pg_constraint c, pg_class r where relname like 'test_inh_check%' and c.conrelid = r.oid @@ -1589,6 +1626,7 @@ BEGIN; ALTER TABLE check_fk_presence_2 DROP CONSTRAINT check_fk_presence_2_id_fkey; ANALYZE check_fk_presence_2; ROLLBACK; +-- \d check_fk_presence_2 DROP TABLE check_fk_presence_1, check_fk_presence_2; -- check column addition within a view (bug #14876) @@ -1596,10 +1634,14 @@ create table at_base_table(id int, stuff text); insert into at_base_table values (23, 'skidoo'); create view at_view_1 as select * from at_base_table bt; create view at_view_2 as select *, to_json(v1) as j from at_view_1 v1; +-- \d+ at_view_1 +-- \d+ at_view_2 explain (verbose, costs off) select * from at_view_2; select * from at_view_2; create or replace view at_view_1 as select *, 2+2 as more from at_base_table bt; +-- \d+ at_view_1 +-- \d+ at_view_2 explain (verbose, costs off) select * from at_view_2; select * from at_view_2; @@ -1922,27 +1964,34 @@ drop schema alter2 cascade; -- CREATE TYPE test_type AS (a int); +-- \d test_type ALTER TYPE nosuchtype ADD ATTRIBUTE b text; -- fails ALTER TYPE test_type ADD ATTRIBUTE b text; +-- \d test_type ALTER TYPE test_type ADD ATTRIBUTE b text; -- fails ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE varchar; +-- \d test_type ALTER TYPE test_type ALTER ATTRIBUTE b SET DATA TYPE integer; +-- \d test_type ALTER TYPE test_type DROP ATTRIBUTE b; +-- \d test_type ALTER TYPE test_type DROP ATTRIBUTE c; -- fails ALTER TYPE test_type DROP ATTRIBUTE IF EXISTS c; ALTER TYPE test_type DROP ATTRIBUTE a, ADD ATTRIBUTE d boolean; +-- \d test_type ALTER TYPE test_type RENAME ATTRIBUTE a TO aa; ALTER TYPE test_type RENAME ATTRIBUTE d TO dd; +-- \d test_type DROP TYPE test_type; @@ -1961,18 +2010,29 @@ DROP TYPE test_type1; CREATE TYPE test_type2 AS (a int, b text); CREATE TABLE test_tbl2 OF test_type2; CREATE TABLE test_tbl2_subclass () INHERITS (test_tbl2); +-- \d test_type2 +-- \d test_tbl2 ALTER TYPE test_type2 ADD ATTRIBUTE c text; -- fails ALTER TYPE test_type2 ADD ATTRIBUTE c text CASCADE; +-- \d test_type2 +-- \d test_tbl2 ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar; -- fails ALTER TYPE test_type2 ALTER ATTRIBUTE b TYPE varchar CASCADE; +-- \d test_type2 +-- \d test_tbl2 ALTER TYPE test_type2 DROP ATTRIBUTE b; -- fails ALTER TYPE test_type2 DROP ATTRIBUTE b CASCADE; +-- \d test_type2 +-- \d test_tbl2 ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa; -- fails ALTER TYPE test_type2 RENAME ATTRIBUTE a TO aa CASCADE; +-- \d test_type2 +-- \d test_tbl2 +-- \d test_tbl2_subclass DROP TABLE test_tbl2_subclass, test_tbl2; DROP TYPE test_type2; @@ -1981,6 +2041,7 @@ CREATE TYPE test_typex AS (a int, b text); CREATE TABLE test_tblx (x int, y test_typex check ((y).a > 0)); ALTER TYPE test_typex DROP ATTRIBUTE a; -- fails ALTER TYPE test_typex DROP ATTRIBUTE a CASCADE; +-- \d test_tblx DROP TABLE test_tblx; DROP TYPE test_typex; @@ -2022,6 +2083,7 @@ ALTER TABLE tt7 OF tt_t0; CREATE TYPE tt_t1 AS (x int, y numeric(8,2)); ALTER TABLE tt7 OF tt_t1; -- reassign an already-typed table ALTER TABLE tt7 NOT OF; +-- \d tt7 -- make sure we can drop a constraint on the parent but it remains on the child CREATE TABLE test_drop_constr_parent (c text CHECK (c IS NOT NULL)); @@ -2051,6 +2113,7 @@ ALTER TABLE IF EXISTS tt8 ALTER COLUMN f SET DEFAULT 0; ALTER TABLE IF EXISTS tt8 RENAME COLUMN f TO f1; ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; +-- \d alter2.tt8 DROP TABLE alter2.tt8; DROP SCHEMA alter2; @@ -2070,6 +2133,7 @@ ALTER TABLE tt9 ADD CONSTRAINT foo UNIQUE(c); -- fail, dup name ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key CHECK(c > 5); -- fail, dup name ALTER TABLE tt9 ADD CONSTRAINT tt9_c_key2 CHECK(c > 6); ALTER TABLE tt9 ADD UNIQUE(c); -- picks nonconflicting name +-- \d tt9 DROP TABLE tt9; @@ -2138,13 +2202,15 @@ SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment F -- filenode function call can return NULL for a relation dropped concurrently -- with the call's surrounding query, so ignore a NULL mapped_oid for -- relations that no longer exist after all calls finish. +-- Temporary relations are ignored, as not supported by pg_filenode_relation(). CREATE TEMP TABLE filenode_mapping AS SELECT oid, mapped_oid, reltablespace, relfilenode, relname FROM pg_class, pg_filenode_relation(reltablespace, pg_relation_filenode(oid)) AS mapped_oid -WHERE relkind IN ('r', 'i', 'S', 't', 'm') AND mapped_oid IS DISTINCT FROM oid; - +WHERE relkind IN ('r', 'i', 'S', 't', 'm') + AND relpersistence != 't' + AND mapped_oid IS DISTINCT FROM oid; SELECT m.* FROM filenode_mapping m LEFT JOIN pg_class c ON c.oid = m.oid WHERE c.oid IS NOT NULL OR m.mapped_oid IS NOT NULL; @@ -2226,36 +2292,48 @@ DROP TABLE logged1; -- test ADD COLUMN IF NOT EXISTS CREATE TABLE test_add_column(c1 integer); +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN c2 integer; +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN c2 integer; -- fail because c2 already exists ALTER TABLE ONLY test_add_column ADD COLUMN c2 integer; -- fail because c2 already exists +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists ALTER TABLE ONLY test_add_column ADD COLUMN IF NOT EXISTS c2 integer; -- skipping because c2 already exists +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN c2 integer, -- fail because c2 already exists ADD COLUMN c3 integer primary key; +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists ADD COLUMN c3 integer primary key; +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists ADD COLUMN IF NOT EXISTS c3 integer primary key; -- skipping because c3 already exists +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN IF NOT EXISTS c2 integer, -- skipping because c2 already exists ADD COLUMN IF NOT EXISTS c3 integer, -- skipping because c3 already exists ADD COLUMN c4 integer REFERENCES test_add_column; +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN IF NOT EXISTS c4 integer REFERENCES test_add_column; +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 8); +-- \d test_add_column ALTER TABLE test_add_column ADD COLUMN IF NOT EXISTS c5 SERIAL CHECK (c5 > 10); +-- \d test_add_column* DROP TABLE test_add_column; +-- \d test_add_column* -- assorted cases with multiple ALTER TABLE steps CREATE TABLE ataddindex(f1 INT); @@ -2264,6 +2342,7 @@ CREATE UNIQUE INDEX ataddindexi0 ON ataddindex(f1); ALTER TABLE ataddindex ADD PRIMARY KEY USING INDEX ataddindexi0, ALTER f1 TYPE BIGINT; +-- \d ataddindex DROP TABLE ataddindex; CREATE TABLE ataddindex(f1 VARCHAR(10)); @@ -2271,18 +2350,21 @@ INSERT INTO ataddindex(f1) VALUES ('foo'), ('a'); ALTER TABLE ataddindex ALTER f1 SET DATA TYPE TEXT, ADD EXCLUDE ((f1 LIKE 'a') WITH =); +-- \d ataddindex DROP TABLE ataddindex; CREATE TABLE ataddindex(id int, ref_id int); ALTER TABLE ataddindex ADD PRIMARY KEY (id), ADD FOREIGN KEY (ref_id) REFERENCES ataddindex; +-- \d ataddindex DROP TABLE ataddindex; CREATE TABLE ataddindex(id int, ref_id int); ALTER TABLE ataddindex ADD UNIQUE (id), ADD FOREIGN KEY (ref_id) REFERENCES ataddindex (id); +-- \d ataddindex DROP TABLE ataddindex; CREATE TABLE atnotnull1 (); @@ -2295,6 +2377,7 @@ ALTER TABLE atnotnull1 ALTER TABLE atnotnull1 ADD COLUMN c INT, ADD PRIMARY KEY (c); +-- \d+ atnotnull1 -- cannot drop column that is part of the partition key CREATE TABLE partitioned ( @@ -2742,12 +2825,18 @@ ALTER TABLE range_parted2 DETACH PARTITION part_rpd CONCURRENTLY; DROP TABLE part_rpd; -- works fine ALTER TABLE range_parted2 DETACH PARTITION part_rp CONCURRENTLY; --- constraint should be created -CREATE TABLE part_rp100 PARTITION OF range_parted2 (CHECK (a>=123 AND a<133 AND a IS NOT NULL)) FOR VALUES FROM (100) to (200); -ALTER TABLE range_parted2 DETACH PARTITION part_rp100 CONCURRENTLY; --- redundant constraint should not be created +-- \d+ range_parted2 DROP TABLE range_parted2; +-- Test that hash partitions continue to work after they're concurrently +-- detached (bugs #18371, #19070) +CREATE TABLE hash_parted2 (a int) PARTITION BY HASH(a); +CREATE TABLE part_hp PARTITION OF hash_parted2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +ALTER TABLE hash_parted2 DETACH PARTITION part_hp CONCURRENTLY; +DROP TABLE hash_parted2; +INSERT INTO part_hp VALUES (1); +DROP TABLE part_hp; + -- Check ALTER TABLE commands for partitioned tables and partitions -- cannot add/drop column to/from *only* the parent @@ -2773,6 +2862,7 @@ ALTER TABLE ONLY list_parted2 ALTER b DROP NOT NULL; ALTER TABLE list_parted2 ADD CONSTRAINT check_b CHECK (b <> 'zz'); ALTER TABLE ONLY list_parted2 DROP CONSTRAINT check_b; -- ... and the partitions should still have both +-- \d+ part_2 -- It's alright though, if no partitions are yet created CREATE TABLE parted_no_parts (a int) PARTITION BY LIST (a); @@ -2984,6 +3074,23 @@ drop table attbl, atref; /* End test case for bug #17409 */ +/* Test case for bug #18970 */ + +create table attbl(a int); +create table atref(b attbl check ((b).a is not null)); +alter table attbl alter column a type numeric; -- someday this should work +alter table atref drop constraint atref_b_check; + +create statistics atref_stat on ((b).a is not null) from atref; +alter table attbl alter column a type numeric; -- someday this should work +drop statistics atref_stat; + +create index atref_idx on atref (((b).a)); +alter table attbl alter column a type numeric; -- someday this should work +drop table attbl, atref; + +/* End test case for bug #18970 */ + -- Test that ALTER TABLE rewrite preserves a clustered index -- for normal indexes and indexes on constraints. create table alttype_cluster (a int); @@ -3036,6 +3143,7 @@ set client_min_messages = 'ERROR'; create publication pub1 for table alter1.t1, tables in schema alter2; reset client_min_messages; alter table alter1.t1 set schema alter2; +-- \d+ alter2.t1 drop publication pub1; drop schema alter1 cascade; drop schema alter2 cascade; diff --git a/crates/squawk_parser/tests/data/regression_suite/arrays.sql b/crates/squawk_parser/tests/data/regression_suite/arrays.sql index 530ecea9..eff45a76 100644 --- a/crates/squawk_parser/tests/data/regression_suite/arrays.sql +++ b/crates/squawk_parser/tests/data/regression_suite/arrays.sql @@ -3,6 +3,7 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR CREATE TABLE arrtest ( a int2[], @@ -20,6 +21,7 @@ CREATE TABLE array_op_test ( t text[] ); +-- \set filename :abs_srcdir '/data/array.data' COPY array_op_test FROM 'filename'; ANALYZE array_op_test; @@ -440,6 +442,7 @@ reset enable_bitmapscan; -- The normal error message includes a platform-dependent limit, -- so suppress it to avoid needing multiple expected-files. +-- \set VERBOSITY sqlstate insert into arr_pk_tbl values(10, '[-2147483648:-2147483647]={1,2}'); update arr_pk_tbl set f1[2147483647] = 42 where pk = 10; @@ -454,6 +457,7 @@ begin a[2147483647] := 42; end $$; +-- \set VERBOSITY default -- test [not] (like|ilike) (any|all) (...) select 'foo' like any (array['%a', '%o']); -- t diff --git a/crates/squawk_parser/tests/data/regression_suite/bit.sql b/crates/squawk_parser/tests/data/regression_suite/bit.sql index 8a7ae4b0..9a719cd2 100644 --- a/crates/squawk_parser/tests/data/regression_suite/bit.sql +++ b/crates/squawk_parser/tests/data/regression_suite/bit.sql @@ -69,6 +69,18 @@ SELECT SUBSTRING('01010101'::varbit FROM -10 FOR -2147483646) AS "error"; --- Bit operations DROP TABLE varbit_table; CREATE TABLE varbit_table (a BIT VARYING(16), b BIT VARYING(16)); +COPY varbit_table FROM stdin; +-- X0F X10 +-- X1F X11 +-- X2F X12 +-- X3F X13 +-- X8F X04 +-- X000F X0010 +-- X0123 XFFFF +-- X2468 X2468 +-- XFA50 X05AF +-- X1234 XFFF5 +-- \. SELECT a, b, ~a AS "~ a", a & b AS "a & b", a | b AS "a | b", a # b AS "a # b" FROM varbit_table; @@ -81,6 +93,18 @@ DROP TABLE varbit_table; --- Bit operations DROP TABLE bit_table; CREATE TABLE bit_table (a BIT(16), b BIT(16)); +COPY bit_table FROM stdin; +-- X0F00 X1000 +-- X1F00 X1100 +-- X2F00 X1200 +-- X3F00 X1300 +-- X8F00 X0400 +-- X000F X0010 +-- X0123 XFFFF +-- X2468 X2468 +-- XFA50 X05AF +-- X1234 XFFF5 +-- \. SELECT a,b,~a AS "~ a",a & b AS "a & b", a|b AS "a | b", a # b AS "a # b" FROM bit_table; @@ -211,6 +235,7 @@ CREATE TABLE bit_defaults( b3 bit varying(5) DEFAULT '1001', b4 bit varying(5) DEFAULT B'0101' ); +-- \d bit_defaults INSERT INTO bit_defaults DEFAULT VALUES; TABLE bit_defaults; diff --git a/crates/squawk_parser/tests/data/regression_suite/boolean.sql b/crates/squawk_parser/tests/data/regression_suite/boolean.sql index 731bc1ec..08193428 100644 --- a/crates/squawk_parser/tests/data/regression_suite/boolean.sql +++ b/crates/squawk_parser/tests/data/regression_suite/boolean.sql @@ -230,6 +230,7 @@ FROM booltbl3 ORDER BY o; -- from interfering. CREATE TABLE booltbl4(isfalse bool, istrue bool, isnul bool); INSERT INTO booltbl4 VALUES (false, true, null); +-- \pset null '(null)' -- AND expression need to return null if there's any nulls and not all -- of the value are true diff --git a/crates/squawk_parser/tests/data/regression_suite/btree_index.sql b/crates/squawk_parser/tests/data/regression_suite/btree_index.sql index 2aed7dbf..f48ff6b9 100644 --- a/crates/squawk_parser/tests/data/regression_suite/btree_index.sql +++ b/crates/squawk_parser/tests/data/regression_suite/btree_index.sql @@ -3,6 +3,7 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR CREATE TABLE bt_i4_heap ( seqno int4, @@ -24,12 +25,16 @@ CREATE TABLE bt_f8_heap ( random int4 ); +-- \set filename :abs_srcdir '/data/desc.data' COPY bt_i4_heap FROM 'filename'; +-- \set filename :abs_srcdir '/data/hash.data' COPY bt_name_heap FROM 'filename'; +-- \set filename :abs_srcdir '/data/desc.data' COPY bt_txt_heap FROM 'filename'; +-- \set filename :abs_srcdir '/data/hash.data' COPY bt_f8_heap FROM 'filename'; ANALYZE bt_i4_heap; @@ -138,38 +143,83 @@ SELECT proname, proargtypes, pronamespace ORDER BY proname DESC, proargtypes DESC, pronamespace DESC LIMIT 1; -- --- Add coverage for RowCompare quals whose rhs row has a NULL that ends scan +-- Forwards scan RowCompare qual whose row arg has a NULL that affects our +-- initial positioning strategy -- explain (costs off) SELECT proname, proargtypes, pronamespace FROM pg_proc - WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL) + WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs' ORDER BY proname, proargtypes, pronamespace; SELECT proname, proargtypes, pronamespace FROM pg_proc - WHERE proname = 'abs' AND (proname, proargtypes) < ('abs', NULL) + WHERE (proname, proargtypes) >= ('abs', NULL) AND proname <= 'abs' ORDER BY proname, proargtypes, pronamespace; -- --- Add coverage for backwards scan RowCompare quals whose rhs row has a NULL --- that ends scan +-- Forwards scan RowCompare quals whose row arg has a NULL that ends scan -- explain (costs off) SELECT proname, proargtypes, pronamespace FROM pg_proc - WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL) + WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL) +ORDER BY proname, proargtypes, pronamespace; + +SELECT proname, proargtypes, pronamespace + FROM pg_proc + WHERE proname >= 'abs' AND (proname, proargtypes) < ('abs', NULL) +ORDER BY proname, proargtypes, pronamespace; + +-- +-- Backwards scan RowCompare qual whose row arg has a NULL that affects our +-- initial positioning strategy +-- +explain (costs off) +SELECT proname, proargtypes, pronamespace + FROM pg_proc + WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL) ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; SELECT proname, proargtypes, pronamespace FROM pg_proc - WHERE proname = 'abs' AND (proname, proargtypes) > ('abs', NULL) + WHERE proname >= 'abs' AND (proname, proargtypes) <= ('abs', NULL) ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; -- --- Add coverage for recheck of > key following array advancement on previous --- (left sibling) page that used a high key whose attribute value corresponding --- to the > key was -inf (due to being truncated when the high key was created). +-- Backwards scan RowCompare qual whose row arg has a NULL that ends scan +-- +explain (costs off) +SELECT proname, proargtypes, pronamespace + FROM pg_proc + WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs' +ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; + +SELECT proname, proargtypes, pronamespace + FROM pg_proc + WHERE (proname, proargtypes) > ('abs', NULL) AND proname <= 'abs' +ORDER BY proname DESC, proargtypes DESC, pronamespace DESC; + +-- Makes B-Tree preprocessing deal with unmarking redundant keys that were +-- initially marked required (test case relies on current row compare +-- preprocessing limitations) +explain (costs off) +SELECT proname, proargtypes, pronamespace + FROM pg_proc + WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL) + AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077') +ORDER BY proname, proargtypes, pronamespace; + +SELECT proname, proargtypes, pronamespace + FROM pg_proc + WHERE proname = 'zzzzzz' AND (proname, proargtypes) > ('abs', NULL) + AND pronamespace IN (1, 2, 3) AND proargtypes IN ('26 23', '5077') +ORDER BY proname, proargtypes, pronamespace; + +-- +-- Performs a recheck of > key following array advancement on previous (left +-- sibling) page that used a high key whose attribute value corresponding to +-- the > key was -inf (due to being truncated when the high key was created). -- -- XXX This relies on the assumption that tenk1_thous_tenthous has a truncated -- high key "(183, -inf)" on the first page that we'll scan. The test will only diff --git a/crates/squawk_parser/tests/data/regression_suite/cluster.sql b/crates/squawk_parser/tests/data/regression_suite/cluster.sql index c3ae3443..57ab5eb5 100644 --- a/crates/squawk_parser/tests/data/regression_suite/cluster.sql +++ b/crates/squawk_parser/tests/data/regression_suite/cluster.sql @@ -225,6 +225,7 @@ CLUSTER clstrpart USING clstrpart_idx; CREATE TEMP TABLE new_cluster_info AS SELECT relname, level, relfilenode, relkind FROM pg_partition_tree('clstrpart'::regclass) AS tree JOIN pg_class c ON c.oid=tree.relid ; SELECT relname, old.level, old.relkind, old.relfilenode = new.relfilenode FROM old_cluster_info AS old JOIN new_cluster_info AS new USING (relname) ORDER BY relname COLLATE "C"; -- Partitioned indexes aren't and can't be marked un/clustered: +-- \d clstrpart CLUSTER clstrpart; ALTER TABLE clstrpart SET WITHOUT CLUSTER; ALTER TABLE clstrpart CLUSTER ON clstrpart_idx; diff --git a/crates/squawk_parser/tests/data/regression_suite/collate.icu.utf8.sql b/crates/squawk_parser/tests/data/regression_suite/collate.icu.utf8.sql index 82bef132..9268d49f 100644 --- a/crates/squawk_parser/tests/data/regression_suite/collate.icu.utf8.sql +++ b/crates/squawk_parser/tests/data/regression_suite/collate.icu.utf8.sql @@ -5,7 +5,10 @@ /* skip test if not UTF8 server encoding or no ICU collations installed */ SELECT getdatabaseencoding() <> 'UTF8' OR (SELECT count(*) FROM pg_collation WHERE collprovider = 'i' AND collname <> 'unicode') = 0 - AS skip_test ; + AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif SET client_encoding TO UTF8; @@ -18,6 +21,7 @@ CREATE TABLE collate_test1 ( b text COLLATE "en-x-icu" NOT NULL ); +-- \d collate_test1 CREATE TABLE collate_test_fail ( a int, @@ -38,6 +42,7 @@ CREATE TABLE collate_test_like ( LIKE collate_test1 ); +-- \d collate_test_like CREATE TABLE collate_test2 ( a int, @@ -49,7 +54,7 @@ CREATE TABLE collate_test3 ( b text COLLATE "C" ); -INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'äbc'), (3, 'bbc'), (4, 'ABC'); +INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'äbc'), (3, 'bbc'), (4, 'ABC'); INSERT INTO collate_test2 SELECT * FROM collate_test1; INSERT INTO collate_test3 SELECT * FROM collate_test1; @@ -93,8 +98,8 @@ SELECT * FROM collate_test2 ORDER BY b; SELECT * FROM collate_test3 ORDER BY b; -- constant expression folding -SELECT 'bbc' COLLATE "en-x-icu" > 'äbc' COLLATE "en-x-icu" AS "true"; -SELECT 'bbc' COLLATE "sv-x-icu" > 'äbc' COLLATE "sv-x-icu" AS "false"; +SELECT 'bbc' COLLATE "en-x-icu" > 'äbc' COLLATE "en-x-icu" AS "true"; +SELECT 'bbc' COLLATE "sv-x-icu" > 'äbc' COLLATE "sv-x-icu" AS "false"; -- upper/lower @@ -111,10 +116,10 @@ SELECT a, lower(x COLLATE "C"), lower(y COLLATE "C") FROM collate_test10; SELECT a, x, y FROM collate_test10 ORDER BY lower(y), a; -SELECT lower('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "en-x-icu"); -SELECT casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "en-x-icu"); -SELECT lower('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "tr-x-icu"); -SELECT casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "tr-x-icu"); +SELECT lower('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "en-x-icu"); +SELECT casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "en-x-icu"); +SELECT lower('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "tr-x-icu"); +SELECT casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' COLLATE "tr-x-icu"); -- LIKE/ILIKE @@ -125,11 +130,11 @@ SELECT * FROM collate_test1 WHERE b ILIKE 'abc'; SELECT * FROM collate_test1 WHERE b ILIKE 'abc%'; SELECT * FROM collate_test1 WHERE b ILIKE '%bc%'; -SELECT 'Türkiye' COLLATE "en-x-icu" ILIKE '%KI%' AS "true"; -SELECT 'Türkiye' COLLATE "tr-x-icu" ILIKE '%KI%' AS "false"; +SELECT 'Türkiye' COLLATE "en-x-icu" ILIKE '%KI%' AS "true"; +SELECT 'Türkiye' COLLATE "tr-x-icu" ILIKE '%KI%' AS "false"; -SELECT 'bıt' ILIKE 'BIT' COLLATE "en-x-icu" AS "false"; -SELECT 'bıt' ILIKE 'BIT' COLLATE "tr-x-icu" AS "true"; +SELECT 'bıt' ILIKE 'BIT' COLLATE "en-x-icu" AS "false"; +SELECT 'bıt' ILIKE 'BIT' COLLATE "tr-x-icu" AS "true"; -- The following actually exercises the selectivity estimation for ILIKE. SELECT relname FROM pg_class WHERE relname ILIKE 'abc%'; @@ -149,7 +154,7 @@ CREATE TABLE collate_test6 ( ); INSERT INTO collate_test6 VALUES (1, 'abc'), (2, 'ABC'), (3, '123'), (4, 'ab1'), (5, 'a1!'), (6, 'a c'), (7, '!.;'), (8, ' '), - (9, 'äbç'), (10, 'ÄBÇ'); + (9, 'äbç'), (10, 'ÄBÇ'); SELECT b, b ~ '^[[:alpha:]]+$' AS is_alpha, b ~ '^[[:upper:]]+$' AS is_upper, @@ -162,11 +167,11 @@ SELECT b, b ~ '^[[:space:]]+$' AS is_space FROM collate_test6; -SELECT 'Türkiye' COLLATE "en-x-icu" ~* 'KI' AS "true"; -SELECT 'Türkiye' COLLATE "tr-x-icu" ~* 'KI' AS "true"; -- true with ICU +SELECT 'Türkiye' COLLATE "en-x-icu" ~* 'KI' AS "true"; +SELECT 'Türkiye' COLLATE "tr-x-icu" ~* 'KI' AS "true"; -- true with ICU -SELECT 'bıt' ~* 'BIT' COLLATE "en-x-icu" AS "false"; -SELECT 'bıt' ~* 'BIT' COLLATE "tr-x-icu" AS "false"; -- false with ICU +SELECT 'bıt' ~* 'BIT' COLLATE "en-x-icu" AS "false"; +SELECT 'bıt' ~* 'BIT' COLLATE "tr-x-icu" AS "false"; -- false with ICU -- The following actually exercises the selectivity estimation for ~*. SELECT relname FROM pg_class WHERE relname ~* '^abc'; @@ -420,7 +425,7 @@ DROP ROLE regress_test_role; ALTER COLLATION "en-x-icu" REFRESH VERSION; -- also test for database while we are here -SELECT current_database() AS datname ; +SELECT current_database() AS datname /* \gset */; ALTER DATABASE "datname" REFRESH COLLATION VERSION; @@ -438,6 +443,8 @@ CREATE INDEX collate_dep_test4i ON collate_dep_test4t (b COLLATE test0); DROP COLLATION test0 RESTRICT; -- fail DROP COLLATION test0 CASCADE; +-- \d collate_dep_test1 +-- \d collate_dep_test2 DROP TABLE collate_dep_test1, collate_dep_test4t; DROP TYPE collate_dep_test2; @@ -467,12 +474,12 @@ SELECT * FROM collate_test2 ORDER BY b COLLATE UNICODE; SET client_min_messages=WARNING; CREATE COLLATION testcoll_ignore_accents (provider = icu, locale = '@colStrength=primary;colCaseLevel=yes'); RESET client_min_messages; -SELECT 'aaá' > 'AAA' COLLATE "und-x-icu", 'aaá' < 'AAA' COLLATE testcoll_ignore_accents; +SELECT 'aaá' > 'AAA' COLLATE "und-x-icu", 'aaá' < 'AAA' COLLATE testcoll_ignore_accents; SET client_min_messages=WARNING; CREATE COLLATION testcoll_backwards (provider = icu, locale = '@colBackwards=yes'); RESET client_min_messages; -SELECT 'coté' < 'côte' COLLATE "und-x-icu", 'coté' > 'côte' COLLATE testcoll_backwards; +SELECT 'coté' < 'côte' COLLATE "und-x-icu", 'coté' > 'côte' COLLATE testcoll_backwards; CREATE COLLATION testcoll_lower_first (provider = icu, locale = '@colCaseFirst=lower'); CREATE COLLATION testcoll_upper_first (provider = icu, locale = '@colCaseFirst=upper'); @@ -491,7 +498,7 @@ CREATE COLLATION testcoll_error1 (provider = icu, locale = '@colNumeric=lower'); -- test that attributes not handled by icu_set_collation_attributes() -- (handled by ucol_open() directly) also work CREATE COLLATION testcoll_de_phonebook (provider = icu, locale = 'de@collation=phonebook'); -SELECT 'Goldmann' < 'Götz' COLLATE "de-x-icu", 'Goldmann' > 'Götz' COLLATE testcoll_de_phonebook; +SELECT 'Goldmann' < 'Götz' COLLATE "de-x-icu", 'Goldmann' > 'Götz' COLLATE testcoll_de_phonebook; -- rules @@ -523,8 +530,8 @@ CREATE TABLE test6 (a int, b text); INSERT INTO test6 VALUES (1, U&'zy\00E4bc'); INSERT INTO test6 VALUES (2, U&'zy\0061\0308bc'); SELECT * FROM test6; -SELECT * FROM test6 WHERE b = 'zyäbc' COLLATE ctest_det; -SELECT * FROM test6 WHERE b = 'zyäbc' COLLATE ctest_nondet; +SELECT * FROM test6 WHERE b = 'zyäbc' COLLATE ctest_det; +SELECT * FROM test6 WHERE b = 'zyäbc' COLLATE ctest_nondet; SELECT strpos(b COLLATE ctest_det, 'bc') FROM test6; SELECT strpos(b COLLATE ctest_nondet, 'bc') FROM test6; @@ -540,16 +547,16 @@ SELECT a, split_part(b COLLATE ctest_nondet, U&'\00E4b', -1) FROM test6; SELECT a, string_to_array(b COLLATE ctest_det, U&'\00E4b') FROM test6; SELECT a, string_to_array(b COLLATE ctest_nondet, U&'\00E4b') FROM test6; -SELECT * FROM test6 WHERE b LIKE 'zyäbc' COLLATE ctest_det; -SELECT * FROM test6 WHERE b LIKE 'zyäbc' COLLATE ctest_nondet; +SELECT * FROM test6 WHERE b LIKE 'zyäbc' COLLATE ctest_det; +SELECT * FROM test6 WHERE b LIKE 'zyäbc' COLLATE ctest_nondet; -- same with arrays CREATE TABLE test6a (a int, b text[]); INSERT INTO test6a VALUES (1, ARRAY[U&'\00E4bc']); INSERT INTO test6a VALUES (2, ARRAY[U&'\0061\0308bc']); SELECT * FROM test6a; -SELECT * FROM test6a WHERE b = ARRAY['äbc'] COLLATE ctest_det; -SELECT * FROM test6a WHERE b = ARRAY['äbc'] COLLATE ctest_nondet; +SELECT * FROM test6a WHERE b = ARRAY['äbc'] COLLATE ctest_det; +SELECT * FROM test6a WHERE b = ARRAY['äbc'] COLLATE ctest_nondet; CREATE COLLATION case_sensitive (provider = icu, locale = ''); CREATE COLLATION case_insensitive (provider = icu, locale = '@colStrength=secondary', deterministic = false); @@ -561,6 +568,9 @@ SELECT 'abc' <= 'ABC' COLLATE case_insensitive, 'abc' >= 'ABC' COLLATE case_inse SELECT array_sort('{a,B}'::text[] COLLATE case_insensitive); SELECT array_sort('{a,B}'::text[] COLLATE "C"); +-- test replace() at the end of the string (bug #19341) +SELECT replace('testX' COLLATE case_insensitive, 'x' COLLATE case_insensitive, 'er'); + -- test language tags CREATE COLLATION lt_insensitive (provider = icu, locale = 'en-u-ks-level1', deterministic = false); SELECT 'aBcD' COLLATE lt_insensitive = 'AbCd' COLLATE lt_insensitive; @@ -674,10 +684,10 @@ RESET enable_seqscan; -- Unicode special case: different variants of Greek lower case sigma. -- A naive implementation like citext that just does lower(x) = --- lower(y) will do the wrong thing here, because lower('Σ') is 'σ' --- but upper('ς') is 'Σ'. -SELECT 'ὀδυσσεύς' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_sensitive; -SELECT 'ὀδυσσεύς' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_insensitive; +-- lower(y) will do the wrong thing here, because lower('Σ') is 'σ' +-- but upper('ς') is 'Σ'. +SELECT 'ὀδυσσεύς' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_sensitive; +SELECT 'ὀδυσσεύς' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_insensitive; -- name vs. text comparison operators SELECT relname FROM pg_class WHERE relname = 'PG_CLASS'::text COLLATE case_insensitive; @@ -706,14 +716,14 @@ CREATE COLLATION ignore_accents (provider = icu, locale = '@colStrength=primary; RESET client_min_messages; CREATE TABLE test4 (a int, b text); -INSERT INTO test4 VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté'); +INSERT INTO test4 VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté'); SELECT * FROM test4 WHERE b = 'cote'; SELECT * FROM test4 WHERE b = 'cote' COLLATE ignore_accents; SELECT * FROM test4 WHERE b = 'Cote' COLLATE ignore_accents; -- still case-sensitive SELECT * FROM test4 WHERE b = 'Cote' COLLATE case_insensitive; CREATE TABLE test4nfd (a int, b text); -INSERT INTO test4nfd VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté'); +INSERT INTO test4nfd VALUES (1, 'cote'), (2, 'côte'), (3, 'coté'), (4, 'côté'); UPDATE test4nfd SET b = normalize(b, nfd); -- This shows why replace should be greedy. Otherwise, in the NFD @@ -990,6 +1000,19 @@ INSERT INTO t5 (a, b) values (1, 'D1'), (2, 'D2'), (3, 'd1'); -- rewriting.) SELECT * FROM t5 ORDER BY c ASC, a ASC; +-- Check that DEFAULT expressions in SQL/JSON functions use the same collation +-- as the RETURNING type. Mismatched collations should raise an error. +CREATE DOMAIN d1 AS text COLLATE case_insensitive; +CREATE DOMAIN d2 AS text COLLATE "C"; +SELECT JSON_VALUE('{"a": "A"}', '$.a' RETURNING d1 DEFAULT ('C' COLLATE "C") COLLATE case_insensitive ON EMPTY) = 'a'; -- true +SELECT JSON_VALUE('{"a": "A"}', '$.a' RETURNING d1 DEFAULT 'C' ON EMPTY) = 'a'; -- true +SELECT JSON_VALUE('{"a": "A"}', '$.a' RETURNING d1 DEFAULT 'C'::d2 ON EMPTY) = 'a'; -- error +SELECT JSON_VALUE('{"a": "A"}', '$.a' RETURNING d1 DEFAULT 'C' COLLATE "C" ON EMPTY) = 'a'; -- error +SELECT JSON_VALUE('{"a": "A"}', '$.c' RETURNING d1 DEFAULT 'A' ON EMPTY) = 'a'; -- true +SELECT JSON_VALUE('{"a": "A"}', '$.c' RETURNING d1 DEFAULT 'A' COLLATE case_insensitive ON EMPTY) = 'a'; -- true +SELECT JSON_VALUE('{"a": "A"}', '$.c' RETURNING d1 DEFAULT 'A'::d2 ON EMPTY) = 'a'; -- error +SELECT JSON_VALUE('{"a": "A"}', '$.c' RETURNING d1 DEFAULT 'A' COLLATE "C" ON EMPTY) = 'a'; -- error +DROP DOMAIN d1, d2; -- cleanup RESET search_path; diff --git a/crates/squawk_parser/tests/data/regression_suite/collate.linux.utf8.sql b/crates/squawk_parser/tests/data/regression_suite/collate.linux.utf8.sql index a6c2f94f..04bc23a4 100644 --- a/crates/squawk_parser/tests/data/regression_suite/collate.linux.utf8.sql +++ b/crates/squawk_parser/tests/data/regression_suite/collate.linux.utf8.sql @@ -7,7 +7,10 @@ SELECT getdatabaseencoding() <> 'UTF8' OR (SELECT count(*) FROM pg_collation WHERE collname IN ('de_DE', 'en_US', 'sv_SE', 'tr_TR') AND collencoding = pg_char_to_encoding('UTF8')) <> 4 OR version() !~ 'linux-gnu' - AS skip_test ; + AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif SET client_encoding TO UTF8; @@ -20,6 +23,7 @@ CREATE TABLE collate_test1 ( b text COLLATE "en_US" NOT NULL ); +-- \d collate_test1 CREATE TABLE collate_test_fail ( a int, @@ -40,6 +44,7 @@ CREATE TABLE collate_test_like ( LIKE collate_test1 ); +-- \d collate_test_like CREATE TABLE collate_test2 ( a int, @@ -51,7 +56,7 @@ CREATE TABLE collate_test3 ( b text COLLATE "C" ); -INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'äbc'), (3, 'bbc'), (4, 'ABC'); +INSERT INTO collate_test1 VALUES (1, 'abc'), (2, 'äbc'), (3, 'bbc'), (4, 'ABC'); INSERT INTO collate_test2 SELECT * FROM collate_test1; INSERT INTO collate_test3 SELECT * FROM collate_test1; @@ -95,8 +100,8 @@ SELECT * FROM collate_test2 ORDER BY b; SELECT * FROM collate_test3 ORDER BY b; -- constant expression folding -SELECT 'bbc' COLLATE "en_US" > 'äbc' COLLATE "en_US" AS "true"; -SELECT 'bbc' COLLATE "sv_SE" > 'äbc' COLLATE "sv_SE" AS "false"; +SELECT 'bbc' COLLATE "en_US" > 'äbc' COLLATE "en_US" AS "true"; +SELECT 'bbc' COLLATE "sv_SE" > 'äbc' COLLATE "sv_SE" AS "false"; -- upper/lower @@ -122,11 +127,11 @@ SELECT * FROM collate_test1 WHERE b ILIKE 'abc'; SELECT * FROM collate_test1 WHERE b ILIKE 'abc%'; SELECT * FROM collate_test1 WHERE b ILIKE '%bc%'; -SELECT 'Türkiye' COLLATE "en_US" ILIKE '%KI%' AS "true"; -SELECT 'Türkiye' COLLATE "tr_TR" ILIKE '%KI%' AS "false"; +SELECT 'Türkiye' COLLATE "en_US" ILIKE '%KI%' AS "true"; +SELECT 'Türkiye' COLLATE "tr_TR" ILIKE '%KI%' AS "false"; -SELECT 'bıt' ILIKE 'BIT' COLLATE "en_US" AS "false"; -SELECT 'bıt' ILIKE 'BIT' COLLATE "tr_TR" AS "true"; +SELECT 'bıt' ILIKE 'BIT' COLLATE "en_US" AS "false"; +SELECT 'bıt' ILIKE 'BIT' COLLATE "tr_TR" AS "true"; -- The following actually exercises the selectivity estimation for ILIKE. SELECT relname FROM pg_class WHERE relname ILIKE 'abc%'; @@ -146,7 +151,7 @@ CREATE TABLE collate_test6 ( ); INSERT INTO collate_test6 VALUES (1, 'abc'), (2, 'ABC'), (3, '123'), (4, 'ab1'), (5, 'a1!'), (6, 'a c'), (7, '!.;'), (8, ' '), - (9, 'äbç'), (10, 'ÄBÇ'); + (9, 'äbç'), (10, 'ÄBÇ'); SELECT b, b ~ '^[[:alpha:]]+$' AS is_alpha, b ~ '^[[:upper:]]+$' AS is_upper, @@ -159,11 +164,11 @@ SELECT b, b ~ '^[[:space:]]+$' AS is_space FROM collate_test6; -SELECT 'Türkiye' COLLATE "en_US" ~* 'KI' AS "true"; -SELECT 'Türkiye' COLLATE "tr_TR" ~* 'KI' AS "false"; +SELECT 'Türkiye' COLLATE "en_US" ~* 'KI' AS "true"; +SELECT 'Türkiye' COLLATE "tr_TR" ~* 'KI' AS "false"; -SELECT 'bıt' ~* 'BIT' COLLATE "en_US" AS "false"; -SELECT 'bıt' ~* 'BIT' COLLATE "tr_TR" AS "true"; +SELECT 'bıt' ~* 'BIT' COLLATE "en_US" AS "false"; +SELECT 'bıt' ~* 'BIT' COLLATE "tr_TR" AS "true"; -- The following actually exercises the selectivity estimation for ~*. SELECT relname FROM pg_class WHERE relname ~* '^abc'; @@ -179,8 +184,8 @@ SELECT to_char(date '2010-04-01', 'DD TMMON YYYY' COLLATE "tr_TR"); -- to_date -SELECT to_date('01 ŞUB 2010', 'DD TMMON YYYY'); -SELECT to_date('01 Şub 2010', 'DD TMMON YYYY'); +SELECT to_date('01 ŞUB 2010', 'DD TMMON YYYY'); +SELECT to_date('01 Şub 2010', 'DD TMMON YYYY'); SELECT to_date('1234567890ab 2010', 'TMMONTH YYYY'); -- fail @@ -406,7 +411,7 @@ DROP ROLE regress_test_role; ALTER COLLATION "en_US" REFRESH VERSION; -- also test for database while we are here -SELECT current_database() AS datname ; +SELECT current_database() AS datname /* \gset */; ALTER DATABASE "datname" REFRESH COLLATION VERSION; @@ -424,6 +429,8 @@ CREATE INDEX collate_dep_test4i ON collate_dep_test4t (b COLLATE test0); DROP COLLATION test0 RESTRICT; -- fail DROP COLLATION test0 CASCADE; +-- \d collate_dep_test1 +-- \d collate_dep_test2 DROP TABLE collate_dep_test1, collate_dep_test4t; DROP TYPE collate_dep_test2; diff --git a/crates/squawk_parser/tests/data/regression_suite/collate.sql b/crates/squawk_parser/tests/data/regression_suite/collate.sql index 924a27d8..5bc1f846 100644 --- a/crates/squawk_parser/tests/data/regression_suite/collate.sql +++ b/crates/squawk_parser/tests/data/regression_suite/collate.sql @@ -18,6 +18,7 @@ CREATE TABLE collate_test1 ( b text COLLATE "C" NOT NULL ); +-- \d collate_test1 CREATE TABLE collate_test_fail ( a int COLLATE "C", @@ -28,6 +29,7 @@ CREATE TABLE collate_test_like ( LIKE collate_test1 ); +-- \d collate_test_like CREATE TABLE collate_test2 ( a int, @@ -276,6 +278,7 @@ SELECT collation for ((SELECT b FROM collate_test1 LIMIT 1)); CREATE VIEW collate_on_int AS SELECT c1+1 AS c1p FROM (SELECT ('4' COLLATE "C")::INT AS c1) ss; +-- \d+ collate_on_int -- Check conflicting or redundant options in CREATE COLLATION -- LC_COLLATE diff --git a/crates/squawk_parser/tests/data/regression_suite/collate.utf8.sql b/crates/squawk_parser/tests/data/regression_suite/collate.utf8.sql index 392eb23a..0cea2dab 100644 --- a/crates/squawk_parser/tests/data/regression_suite/collate.utf8.sql +++ b/crates/squawk_parser/tests/data/regression_suite/collate.utf8.sql @@ -4,7 +4,10 @@ */ /* skip test if not UTF8 server encoding */ -SELECT getdatabaseencoding() <> 'UTF8' AS skip_test ; +SELECT getdatabaseencoding() <> 'UTF8' AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif SET client_encoding TO UTF8; @@ -40,12 +43,12 @@ CREATE TABLE test_pg_c_utf8 ( ); INSERT INTO test_pg_c_utf8 VALUES ('abc DEF 123abc'), - ('ábc sßs ßss DÉF'), - ('DŽxxDŽ džxxDž Džxxdž'), - (U&'Λλ 1a \FF11a'), - ('ȺȺȺ'), - ('ⱥⱥⱥ'), - ('ⱥȺ'); + ('ábc sßs ßss DÉF'), + ('DŽxxDŽ džxxDž Džxxdž'), + (U&'Λλ 1a \FF11a'), + ('ȺȺȺ'), + ('ⱥⱥⱥ'), + ('ⱥȺ'); SELECT t, lower(t), initcap(t), upper(t), @@ -58,9 +61,9 @@ SELECT DROP TABLE test_pg_c_utf8; -- negative test: Final_Sigma not used for builtin locale C.UTF-8 -SELECT lower('ΑΣ' COLLATE PG_C_UTF8); -SELECT lower('ΑͺΣͺ' COLLATE PG_C_UTF8); -SELECT lower('Α΄Σ΄' COLLATE PG_C_UTF8); +SELECT lower('ΑΣ' COLLATE PG_C_UTF8); +SELECT lower('ΑͺΣͺ' COLLATE PG_C_UTF8); +SELECT lower('Α΄Σ΄' COLLATE PG_C_UTF8); -- properties @@ -69,18 +72,18 @@ SELECT 'xyz' !~ '[[:upper:]]' COLLATE PG_C_UTF8; SELECT '@' !~ '[[:alnum:]]' COLLATE PG_C_UTF8; SELECT '=' ~ '[[:punct:]]' COLLATE PG_C_UTF8; -- symbols are punctuation in posix SELECT 'a8a' ~ '[[:digit:]]' COLLATE PG_C_UTF8; -SELECT '൧' !~ '\d' COLLATE PG_C_UTF8; -- only 0-9 considered digits in posix +SELECT '൧' !~ '\d' COLLATE PG_C_UTF8; -- only 0-9 considered digits in posix -- case mapping SELECT 'xYz' ~* 'XyZ' COLLATE PG_C_UTF8; SELECT 'xAb' ~* '[W-Y]' COLLATE PG_C_UTF8; SELECT 'xAb' !~* '[c-d]' COLLATE PG_C_UTF8; -SELECT 'Δ' ~* '[γ-λ]' COLLATE PG_C_UTF8; -SELECT 'δ' ~* '[Γ-Λ]' COLLATE PG_C_UTF8; -- same as above with cases reversed +SELECT 'Δ' ~* '[γ-λ]' COLLATE PG_C_UTF8; +SELECT 'δ' ~* '[Γ-Λ]' COLLATE PG_C_UTF8; -- same as above with cases reversed -- case folding -select casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' collate PG_C_UTF8); +select casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' collate PG_C_UTF8); -- -- Test PG_UNICODE_FAST @@ -96,12 +99,12 @@ CREATE TABLE test_pg_unicode_fast ( ); INSERT INTO test_pg_unicode_fast VALUES ('abc DEF 123abc'), - ('ábc sßs ßss DÉF'), - ('DŽxxDŽ džxxDž Džxxdž'), - (U&'Λλ 1a \FF11a'), - ('ȺȺȺ'), - ('ⱥⱥⱥ'), - ('ⱥȺ'); + ('ábc sßs ßss DÉF'), + ('DŽxxDŽ džxxDž Džxxdž'), + (U&'Λλ 1a \FF11a'), + ('ȺȺȺ'), + ('ⱥⱥⱥ'), + ('ⱥȺ'); SELECT t, lower(t), initcap(t), upper(t), @@ -114,17 +117,17 @@ SELECT DROP TABLE test_pg_unicode_fast; -- test Final_Sigma -SELECT lower('ΑΣ' COLLATE PG_UNICODE_FAST); -- 0391 03A3 -SELECT lower('ΑΣ0' COLLATE PG_UNICODE_FAST); -- 0391 03A3 0030 -SELECT lower('ἈΣ̓' COLLATE PG_UNICODE_FAST); -- 0391 0343 03A3 0343 -SELECT lower('ᾼΣͅ' COLLATE PG_UNICODE_FAST); -- 0391 0345 03A3 0345 +SELECT lower('ΑΣ' COLLATE PG_UNICODE_FAST); -- 0391 03A3 +SELECT lower('ΑΣ0' COLLATE PG_UNICODE_FAST); -- 0391 03A3 0030 +SELECT lower('ἈΣ̓' COLLATE PG_UNICODE_FAST); -- 0391 0343 03A3 0343 +SELECT lower('ᾼΣͅ' COLLATE PG_UNICODE_FAST); -- 0391 0345 03A3 0345 -- test !Final_Sigma -SELECT lower('Σ' COLLATE PG_UNICODE_FAST); -- 03A3 -SELECT lower('0Σ' COLLATE PG_UNICODE_FAST); -- 0030 03A3 -SELECT lower('ΑΣΑ' COLLATE PG_UNICODE_FAST); -- 0391 03A3 0391 -SELECT lower('ἈΣ̓Α' COLLATE PG_UNICODE_FAST); -- 0391 0343 03A3 0343 0391 -SELECT lower('ᾼΣͅΑ' COLLATE PG_UNICODE_FAST); -- 0391 0345 03A3 0345 0391 +SELECT lower('Σ' COLLATE PG_UNICODE_FAST); -- 03A3 +SELECT lower('0Σ' COLLATE PG_UNICODE_FAST); -- 0030 03A3 +SELECT lower('ΑΣΑ' COLLATE PG_UNICODE_FAST); -- 0391 03A3 0391 +SELECT lower('ἈΣ̓Α' COLLATE PG_UNICODE_FAST); -- 0391 0343 03A3 0343 0391 +SELECT lower('ᾼΣͅΑ' COLLATE PG_UNICODE_FAST); -- 0391 0345 03A3 0345 0391 -- properties @@ -133,15 +136,15 @@ SELECT 'xyz' !~ '[[:upper:]]' COLLATE PG_UNICODE_FAST; SELECT '@' !~ '[[:alnum:]]' COLLATE PG_UNICODE_FAST; SELECT '=' !~ '[[:punct:]]' COLLATE PG_UNICODE_FAST; -- symbols are not punctuation SELECT 'a8a' ~ '[[:digit:]]' COLLATE PG_UNICODE_FAST; -SELECT '൧' ~ '\d' COLLATE PG_UNICODE_FAST; +SELECT '൧' ~ '\d' COLLATE PG_UNICODE_FAST; -- case mapping SELECT 'xYz' ~* 'XyZ' COLLATE PG_UNICODE_FAST; SELECT 'xAb' ~* '[W-Y]' COLLATE PG_UNICODE_FAST; SELECT 'xAb' !~* '[c-d]' COLLATE PG_UNICODE_FAST; -SELECT 'Δ' ~* '[γ-λ]' COLLATE PG_UNICODE_FAST; -SELECT 'δ' ~* '[Γ-Λ]' COLLATE PG_UNICODE_FAST; -- same as above with cases reversed +SELECT 'Δ' ~* '[γ-λ]' COLLATE PG_UNICODE_FAST; +SELECT 'δ' ~* '[Γ-Λ]' COLLATE PG_UNICODE_FAST; -- same as above with cases reversed -- case folding -select casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' collate PG_UNICODE_FAST); +select casefold('AbCd 123 #$% ıiIİ ẞ ß DŽDždž Σσς' collate PG_UNICODE_FAST); diff --git a/crates/squawk_parser/tests/data/regression_suite/compression.sql b/crates/squawk_parser/tests/data/regression_suite/compression.sql index 2297aac9..9ec25580 100644 --- a/crates/squawk_parser/tests/data/regression_suite/compression.sql +++ b/crates/squawk_parser/tests/data/regression_suite/compression.sql @@ -1,3 +1,9 @@ +-- Default set of tests for TOAST compression, independent on compression +-- methods supported by the build. + +CREATE SCHEMA pglz; +SET search_path TO pglz, public; +-- \set HIDE_TOAST_COMPRESSION false -- ensure we get stable results regardless of installation's default SET default_toast_compression = 'pglz'; @@ -6,135 +12,78 @@ SET default_toast_compression = 'pglz'; CREATE TABLE cmdata(f1 text COMPRESSION pglz); CREATE INDEX idx ON cmdata(f1); INSERT INTO cmdata VALUES(repeat('1234567890', 1000)); -CREATE TABLE cmdata1(f1 TEXT COMPRESSION lz4); -INSERT INTO cmdata1 VALUES(repeat('1234567890', 1004)); +-- \d+ cmdata -- verify stored compression method in the data SELECT pg_column_compression(f1) FROM cmdata; -SELECT pg_column_compression(f1) FROM cmdata1; -- decompress data slice SELECT SUBSTR(f1, 200, 5) FROM cmdata; -SELECT SUBSTR(f1, 2000, 50) FROM cmdata1; -- copy with table creation SELECT * INTO cmmove1 FROM cmdata; +-- \d+ cmmove1 SELECT pg_column_compression(f1) FROM cmmove1; --- copy to existing table -CREATE TABLE cmmove3(f1 text COMPRESSION pglz); -INSERT INTO cmmove3 SELECT * FROM cmdata; -INSERT INTO cmmove3 SELECT * FROM cmdata1; -SELECT pg_column_compression(f1) FROM cmmove3; - --- test LIKE INCLUDING COMPRESSION -CREATE TABLE cmdata2 (LIKE cmdata1 INCLUDING COMPRESSION); -DROP TABLE cmdata2; - -- try setting compression for incompressible data type CREATE TABLE cmdata2 (f1 int COMPRESSION pglz); --- update using datum from different table -CREATE TABLE cmmove2(f1 text COMPRESSION pglz); -INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004)); -SELECT pg_column_compression(f1) FROM cmmove2; -UPDATE cmmove2 SET f1 = cmdata1.f1 FROM cmdata1; -SELECT pg_column_compression(f1) FROM cmmove2; - -- test externally stored compressed data CREATE OR REPLACE FUNCTION large_val() RETURNS TEXT LANGUAGE SQL AS 'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g'; CREATE TABLE cmdata2 (f1 text COMPRESSION pglz); INSERT INTO cmdata2 SELECT large_val() || repeat('a', 4000); SELECT pg_column_compression(f1) FROM cmdata2; -INSERT INTO cmdata1 SELECT large_val() || repeat('a', 4000); -SELECT pg_column_compression(f1) FROM cmdata1; -SELECT SUBSTR(f1, 200, 5) FROM cmdata1; SELECT SUBSTR(f1, 200, 5) FROM cmdata2; DROP TABLE cmdata2; --test column type update varlena/non-varlena CREATE TABLE cmdata2 (f1 int); +-- \d+ cmdata2 ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; +-- \d+ cmdata2 ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE int USING f1::integer; +-- \d+ cmdata2 --changing column storage should not impact the compression method --but the data should not be compressed ALTER TABLE cmdata2 ALTER COLUMN f1 TYPE varchar; ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION pglz; +-- \d+ cmdata2 ALTER TABLE cmdata2 ALTER COLUMN f1 SET STORAGE plain; +-- \d+ cmdata2 INSERT INTO cmdata2 VALUES (repeat('123456789', 800)); SELECT pg_column_compression(f1) FROM cmdata2; --- test compression with materialized view -CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata1; -SELECT pg_column_compression(f1) FROM cmdata1; -SELECT pg_column_compression(x) FROM compressmv; - --- test compression with partition -CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1); -CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0); -CREATE TABLE cmpart2(f1 text COMPRESSION pglz); - -ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); -INSERT INTO cmpart VALUES (repeat('123456789', 1004)); -INSERT INTO cmpart VALUES (repeat('123456789', 4004)); -SELECT pg_column_compression(f1) FROM cmpart1; -SELECT pg_column_compression(f1) FROM cmpart2; - -- test compression with inheritance -CREATE TABLE cminh() INHERITS(cmdata, cmdata1); -- error -CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata); -- error CREATE TABLE cmdata3(f1 text); CREATE TABLE cminh() INHERITS (cmdata, cmdata3); -- test default_toast_compression GUC +-- suppress machine-dependent details +-- \set VERBOSITY terse SET default_toast_compression = ''; SET default_toast_compression = 'I do not exist compression'; -SET default_toast_compression = 'lz4'; SET default_toast_compression = 'pglz'; - --- test alter compression method -ALTER TABLE cmdata ALTER COLUMN f1 SET COMPRESSION lz4; -INSERT INTO cmdata VALUES (repeat('123456789', 4004)); -SELECT pg_column_compression(f1) FROM cmdata; +-- \set VERBOSITY default ALTER TABLE cmdata2 ALTER COLUMN f1 SET COMPRESSION default; +-- \d+ cmdata2 --- test alter compression method for materialized views -ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4; - --- test alter compression method for partitioned tables -ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz; -ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4; - --- new data should be compressed with the current compression method -INSERT INTO cmpart VALUES (repeat('123456789', 1004)); -INSERT INTO cmpart VALUES (repeat('123456789', 4004)); -SELECT pg_column_compression(f1) FROM cmpart1; -SELECT pg_column_compression(f1) FROM cmpart2; +DROP TABLE cmdata2; -- VACUUM FULL does not recompress SELECT pg_column_compression(f1) FROM cmdata; VACUUM FULL cmdata; SELECT pg_column_compression(f1) FROM cmdata; --- test expression index -DROP TABLE cmdata2; -CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4); -CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2)); -INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM -generate_series(1, 50) g), VERSION()); - -- check data is ok SELECT length(f1) FROM cmdata; -SELECT length(f1) FROM cmdata1; SELECT length(f1) FROM cmmove1; -SELECT length(f1) FROM cmmove2; -SELECT length(f1) FROM cmmove3; CREATE TABLE badcompresstbl (a text COMPRESSION I_Do_Not_Exist_Compression); -- fails CREATE TABLE badcompresstbl (a text); ALTER TABLE badcompresstbl ALTER a SET COMPRESSION I_Do_Not_Exist_Compression; -- fails DROP TABLE badcompresstbl; +-- \set HIDE_TOAST_COMPRESSION true diff --git a/crates/squawk_parser/tests/data/regression_suite/compression_lz4.sql b/crates/squawk_parser/tests/data/regression_suite/compression_lz4.sql new file mode 100644 index 00000000..a0bef14a --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/compression_lz4.sql @@ -0,0 +1,129 @@ +-- Tests for TOAST compression with lz4 + +SELECT NOT(enumvals @> '{lz4}') AS skip_test FROM pg_settings WHERE + name = 'default_toast_compression' /* \gset */; +-- \if :skip_test +-- \echo '*** skipping TOAST tests with lz4 (not supported) ***' +-- \quit +-- \endif + +CREATE SCHEMA lz4; +SET search_path TO lz4, public; + +-- \set HIDE_TOAST_COMPRESSION false + +-- Ensure we get stable results regardless of the installation's default. +-- We rely on this GUC value for a few tests. +SET default_toast_compression = 'pglz'; + +-- test creating table with compression method +CREATE TABLE cmdata_pglz(f1 text COMPRESSION pglz); +CREATE INDEX idx ON cmdata_pglz(f1); +INSERT INTO cmdata_pglz VALUES(repeat('1234567890', 1000)); +-- \d+ cmdata +CREATE TABLE cmdata_lz4(f1 TEXT COMPRESSION lz4); +INSERT INTO cmdata_lz4 VALUES(repeat('1234567890', 1004)); +-- \d+ cmdata1 + +-- verify stored compression method in the data +SELECT pg_column_compression(f1) FROM cmdata_lz4; + +-- decompress data slice +SELECT SUBSTR(f1, 200, 5) FROM cmdata_pglz; +SELECT SUBSTR(f1, 2000, 50) FROM cmdata_lz4; + +-- copy with table creation +SELECT * INTO cmmove1 FROM cmdata_lz4; +-- \d+ cmmove1 +SELECT pg_column_compression(f1) FROM cmmove1; + +-- test LIKE INCLUDING COMPRESSION. The GUC default_toast_compression +-- has no effect, the compression method from the table being copied. +CREATE TABLE cmdata2 (LIKE cmdata_lz4 INCLUDING COMPRESSION); +-- \d+ cmdata2 +DROP TABLE cmdata2; + +-- copy to existing table +CREATE TABLE cmmove3(f1 text COMPRESSION pglz); +INSERT INTO cmmove3 SELECT * FROM cmdata_pglz; +INSERT INTO cmmove3 SELECT * FROM cmdata_lz4; +SELECT pg_column_compression(f1) FROM cmmove3; + +-- update using datum from different table with LZ4 data. +CREATE TABLE cmmove2(f1 text COMPRESSION pglz); +INSERT INTO cmmove2 VALUES (repeat('1234567890', 1004)); +SELECT pg_column_compression(f1) FROM cmmove2; +UPDATE cmmove2 SET f1 = cmdata_lz4.f1 FROM cmdata_lz4; +SELECT pg_column_compression(f1) FROM cmmove2; + +-- test externally stored compressed data +CREATE OR REPLACE FUNCTION large_val_lz4() RETURNS TEXT LANGUAGE SQL AS +'select array_agg(fipshash(g::text))::text from generate_series(1, 256) g'; +CREATE TABLE cmdata2 (f1 text COMPRESSION lz4); +INSERT INTO cmdata2 SELECT large_val_lz4() || repeat('a', 4000); +SELECT pg_column_compression(f1) FROM cmdata2; +SELECT SUBSTR(f1, 200, 5) FROM cmdata2; +DROP TABLE cmdata2; +DROP FUNCTION large_val_lz4; + +-- test compression with materialized view +CREATE MATERIALIZED VIEW compressmv(x) AS SELECT * FROM cmdata_lz4; +-- \d+ compressmv +SELECT pg_column_compression(f1) FROM cmdata_lz4; +SELECT pg_column_compression(x) FROM compressmv; + +-- test compression with partition +CREATE TABLE cmpart(f1 text COMPRESSION lz4) PARTITION BY HASH(f1); +CREATE TABLE cmpart1 PARTITION OF cmpart FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE cmpart2(f1 text COMPRESSION pglz); + +ALTER TABLE cmpart ATTACH PARTITION cmpart2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +INSERT INTO cmpart VALUES (repeat('123456789', 1004)); +INSERT INTO cmpart VALUES (repeat('123456789', 4004)); +SELECT pg_column_compression(f1) FROM cmpart1; +SELECT pg_column_compression(f1) FROM cmpart2; + +-- test compression with inheritance +CREATE TABLE cminh() INHERITS(cmdata_pglz, cmdata_lz4); -- error +CREATE TABLE cminh(f1 TEXT COMPRESSION lz4) INHERITS(cmdata_pglz); -- error +CREATE TABLE cmdata3(f1 text); +CREATE TABLE cminh() INHERITS (cmdata_pglz, cmdata3); + +-- test default_toast_compression GUC +SET default_toast_compression = 'lz4'; + +-- test alter compression method +ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION lz4; +INSERT INTO cmdata_pglz VALUES (repeat('123456789', 4004)); +-- \d+ cmdata +SELECT pg_column_compression(f1) FROM cmdata_pglz; +ALTER TABLE cmdata_pglz ALTER COLUMN f1 SET COMPRESSION pglz; + +-- test alter compression method for materialized views +ALTER MATERIALIZED VIEW compressmv ALTER COLUMN x SET COMPRESSION lz4; +-- \d+ compressmv + +-- test alter compression method for partitioned tables +ALTER TABLE cmpart1 ALTER COLUMN f1 SET COMPRESSION pglz; +ALTER TABLE cmpart2 ALTER COLUMN f1 SET COMPRESSION lz4; + +-- new data should be compressed with the current compression method +INSERT INTO cmpart VALUES (repeat('123456789', 1004)); +INSERT INTO cmpart VALUES (repeat('123456789', 4004)); +SELECT pg_column_compression(f1) FROM cmpart1; +SELECT pg_column_compression(f1) FROM cmpart2; + +-- test expression index +CREATE TABLE cmdata2 (f1 TEXT COMPRESSION pglz, f2 TEXT COMPRESSION lz4); +CREATE UNIQUE INDEX idx1 ON cmdata2 ((f1 || f2)); +INSERT INTO cmdata2 VALUES((SELECT array_agg(fipshash(g::TEXT))::TEXT FROM +generate_series(1, 50) g), VERSION()); + +-- check data is ok +SELECT length(f1) FROM cmdata_pglz; +SELECT length(f1) FROM cmdata_lz4; +SELECT length(f1) FROM cmmove1; +SELECT length(f1) FROM cmmove2; +SELECT length(f1) FROM cmmove3; + +-- \set HIDE_TOAST_COMPRESSION true diff --git a/crates/squawk_parser/tests/data/regression_suite/constraints.sql b/crates/squawk_parser/tests/data/regression_suite/constraints.sql index 490354b5..cf34b584 100644 --- a/crates/squawk_parser/tests/data/regression_suite/constraints.sql +++ b/crates/squawk_parser/tests/data/regression_suite/constraints.sql @@ -10,6 +10,7 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR -- -- DEFAULT syntax @@ -255,10 +256,12 @@ CREATE TABLE COPY_TBL (x INT, y TEXT, z INT, CONSTRAINT COPY_CON CHECK (x > 3 AND y <> 'check failed' AND x < 7 )); +-- \set filename :abs_srcdir '/data/constro.data' COPY COPY_TBL FROM 'filename'; SELECT * FROM COPY_TBL; +-- \set filename :abs_srcdir '/data/constrf.data' COPY COPY_TBL FROM 'filename'; SELECT * FROM COPY_TBL; @@ -534,6 +537,9 @@ CREATE TABLE UNIQUE_NOTEN_TBL(i int UNIQUE NOT ENFORCED); ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key ENFORCED; ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT ENFORCED; +-- can't make an existing constraint NOT VALID +ALTER TABLE unique_tbl ALTER CONSTRAINT unique_tbl_i_key NOT VALID; + DROP TABLE unique_tbl; -- @@ -616,20 +622,25 @@ DROP TABLE deferred_excl; -- verify constraints created for NOT NULL clauses CREATE TABLE notnull_tbl1 (a INTEGER NOT NULL NOT NULL); +-- \d+ notnull_tbl1 -- no-op ALTER TABLE notnull_tbl1 ADD CONSTRAINT nn NOT NULL a; +-- \d+ notnull_tbl1 -- duplicate name ALTER TABLE notnull_tbl1 ADD COLUMN b INT CONSTRAINT notnull_tbl1_a_not_null NOT NULL; -- DROP NOT NULL gets rid of both the attnotnull flag and the constraint itself ALTER TABLE notnull_tbl1 ALTER a DROP NOT NULL; +-- \d+ notnull_tbl1 -- SET NOT NULL puts both back ALTER TABLE notnull_tbl1 ALTER a SET NOT NULL; +-- \d+ notnull_tbl1 -- Doing it twice doesn't create a redundant constraint ALTER TABLE notnull_tbl1 ALTER a SET NOT NULL; select conname, contype, conkey from pg_constraint where conrelid = 'notnull_tbl1'::regclass; -- Using the "table constraint" syntax also works ALTER TABLE notnull_tbl1 ALTER a DROP NOT NULL; ALTER TABLE notnull_tbl1 ADD CONSTRAINT foobar NOT NULL a; +-- \d+ notnull_tbl1 DROP TABLE notnull_tbl1; -- Verify that constraint names and NO INHERIT are properly considered when @@ -637,11 +648,17 @@ DROP TABLE notnull_tbl1; -- and that conflicting cases are rejected. Mind that table constraints -- handle this separately from column constraints. create table notnull_tbl1 (a int primary key constraint foo not null); +-- \d+ notnull_tbl1 create table notnull_tbl2 (a serial, constraint foo not null a); +-- \d+ notnull_tbl2 create table notnull_tbl3 (constraint foo not null a, a int generated by default as identity); +-- \d+ notnull_tbl3 create table notnull_tbl4 (a int not null constraint foo not null); +-- \d+ notnull_tbl4 create table notnull_tbl5 (a int constraint foo not null constraint foo not null); +-- \d+ notnull_tbl5 create table notnull_tbl6 (like notnull_tbl1, constraint foo not null a); +-- \d+ notnull_tbl6 drop table notnull_tbl2, notnull_tbl3, notnull_tbl4, notnull_tbl5, notnull_tbl6; -- error cases: @@ -665,15 +682,19 @@ drop table notnull_tbl1; -- NOT NULL NO INHERIT CREATE TABLE ATACC1 (a int, NOT NULL a NO INHERIT); CREATE TABLE ATACC2 () INHERITS (ATACC1); +-- \d+ ATACC2 DROP TABLE ATACC1, ATACC2; CREATE TABLE ATACC1 (a int); ALTER TABLE ATACC1 ADD NOT NULL a NO INHERIT; CREATE TABLE ATACC2 () INHERITS (ATACC1); +-- \d+ ATACC2 DROP TABLE ATACC1, ATACC2; CREATE TABLE ATACC1 (a int); CREATE TABLE ATACC2 () INHERITS (ATACC1); ALTER TABLE ATACC1 ADD NOT NULL a NO INHERIT; +-- \d+ ATACC2 CREATE TABLE ATACC3 (PRIMARY KEY (a)) INHERITS (ATACC1); +-- \d+ ATACC3 DROP TABLE ATACC1, ATACC2, ATACC3; -- NOT NULL NO INHERIT is not possible on partitioned tables @@ -690,6 +711,7 @@ ALTER TABLE ATACC1 ADD CONSTRAINT ditto NOT NULL a; -- dropping the NO INHERIT constraint allows this to work ALTER TABLE ATACC2 DROP CONSTRAINT a_is_not_null; ALTER TABLE ATACC1 ADD CONSTRAINT ditto NOT NULL a; +-- \d+ ATACC3 DROP TABLE ATACC1, ATACC2, ATACC3; -- Can't have two constraints with the same name @@ -703,19 +725,25 @@ DROP TABLE notnull_tbl2; CREATE TABLE notnull_tbl3 (a INTEGER NOT NULL, CHECK (a IS NOT NULL)); ALTER TABLE notnull_tbl3 ALTER A DROP NOT NULL; ALTER TABLE notnull_tbl3 ADD b int, ADD CONSTRAINT pk PRIMARY KEY (a, b); +-- \d notnull_tbl3 ALTER TABLE notnull_tbl3 DROP CONSTRAINT pk; +-- \d notnull_tbl3 -- Primary keys cause not-null constraints to be created. CREATE TABLE cnn_pk (a int, b int); CREATE TABLE cnn_pk_child () INHERITS (cnn_pk); ALTER TABLE cnn_pk ADD CONSTRAINT cnn_primarykey PRIMARY KEY (b); +-- \d+ cnn_pk* ALTER TABLE cnn_pk DROP CONSTRAINT cnn_primarykey; +-- \d+ cnn_pk* DROP TABLE cnn_pk, cnn_pk_child; -- As above, but create the primary key ahead of time CREATE TABLE cnn_pk (a int, b int, CONSTRAINT cnn_primarykey PRIMARY KEY (b)); CREATE TABLE cnn_pk_child () INHERITS (cnn_pk); +-- \d+ cnn_pk* ALTER TABLE cnn_pk DROP CONSTRAINT cnn_primarykey; +-- \d+ cnn_pk* DROP TABLE cnn_pk, cnn_pk_child; -- As above, but create the primary key using a UNIQUE index @@ -723,15 +751,18 @@ CREATE TABLE cnn_pk (a int, b int); CREATE UNIQUE INDEX cnn_uq ON cnn_pk (b); CREATE TABLE cnn_pk_child () INHERITS (cnn_pk); ALTER TABLE cnn_pk ADD CONSTRAINT cnn_primarykey PRIMARY KEY USING INDEX cnn_uq; +-- \d+ cnn_pk* DROP TABLE cnn_pk, cnn_pk_child; -- Unique constraints don't give raise to not-null constraints, however. create table cnn_uq (a int); alter table cnn_uq add unique (a); +-- \d+ cnn_uq drop table cnn_uq; create table cnn_uq (a int); create unique index cnn_uq_idx on cnn_uq (a); alter table cnn_uq add unique using index cnn_uq_idx; +-- \d+ cnn_uq -- can't create a primary key on a noinherit not-null create table cnn_pk (a int not null no inherit); @@ -755,6 +786,13 @@ ALTER TABLE notnull_tbl4_lk3 RENAME CONSTRAINT notnull_tbl4_a_not_null TO a_nn; CREATE TABLE notnull_tbl4_cld () INHERITS (notnull_tbl4); CREATE TABLE notnull_tbl4_cld2 (PRIMARY KEY (a) DEFERRABLE) INHERITS (notnull_tbl4); CREATE TABLE notnull_tbl4_cld3 (PRIMARY KEY (a) DEFERRABLE, CONSTRAINT a_nn NOT NULL a) INHERITS (notnull_tbl4); +-- \d+ notnull_tbl4 +-- \d+ notnull_tbl4_lk +-- \d+ notnull_tbl4_lk2 +-- \d+ notnull_tbl4_lk3 +-- \d+ notnull_tbl4_cld +-- \d+ notnull_tbl4_cld2 +-- \d+ notnull_tbl4_cld3 -- leave these tables around for pg_upgrade testing -- It's possible to remove a constraint from parents without affecting children @@ -763,11 +801,13 @@ CREATE TABLE notnull_tbl5 (a int CONSTRAINT ann NOT NULL, CREATE TABLE notnull_tbl5_child () INHERITS (notnull_tbl5); ALTER TABLE ONLY notnull_tbl5 DROP CONSTRAINT ann; ALTER TABLE ONLY notnull_tbl5 ALTER b DROP NOT NULL; +-- \d+ notnull_tbl5_child CREATE TABLE notnull_tbl6 (a int CONSTRAINT ann NOT NULL, b int CONSTRAINT bnn NOT NULL, check (a > 0)) PARTITION BY LIST (a); CREATE TABLE notnull_tbl6_1 PARTITION OF notnull_tbl6 FOR VALUES IN (1); ALTER TABLE ONLY notnull_tbl6 DROP CONSTRAINT ann; ALTER TABLE ONLY notnull_tbl6 ALTER b DROP NOT NULL; +-- \d+ notnull_tbl6_1 -- NOT NULL NOT VALID @@ -783,6 +823,7 @@ ALTER TABLE notnull_tbl1 ADD CONSTRAINT nn NOT NULL a; -- error ALTER TABLE notnull_tbl1 ADD CONSTRAINT nn NOT NULL a NOT VALID; -- ok -- even an invalid not-null forbids new nulls INSERT INTO notnull_tbl1 VALUES (NULL, 4); +-- \d+ notnull_tbl1 -- If we have an invalid constraint, we can't have another ALTER TABLE notnull_tbl1 ADD CONSTRAINT nn1 NOT NULL a NOT VALID NO INHERIT; @@ -791,9 +832,13 @@ ALTER TABLE notnull_tbl1 ADD CONSTRAINT nn NOT NULL a; -- cannot add primary key on a column with an invalid not-null ALTER TABLE notnull_tbl1 ADD PRIMARY KEY (a); +-- cannot set column as generated-as-identity if it has an invalid not-null +ALTER TABLE notnull_tbl1 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; + -- ALTER column SET NOT NULL validates an invalid constraint (but this fails -- because of rows with null values) ALTER TABLE notnull_tbl1 ALTER a SET NOT NULL; +-- \d+ notnull_tbl1 -- Creating a derived table using LIKE gets the constraint, but it's valid CREATE TABLE notnull_tbl1_copy (LIKE notnull_tbl1); @@ -958,6 +1003,9 @@ create table constr_parent3 (a int not null); create table constr_child3 () inherits (constr_parent2, constr_parent3); EXECUTE get_nnconstraint_info('{constr_parent3, constr_child3}'); +COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_parent2 IS 'this constraint is invalid'; +COMMENT ON CONSTRAINT constr_parent2_a_not_null ON constr_child2 IS 'this constraint is valid'; + DEALLOCATE get_nnconstraint_info; -- end NOT NULL NOT VALID @@ -998,3 +1046,14 @@ DROP DOMAIN constraint_comments_dom; DROP ROLE regress_constraint_comments; DROP ROLE regress_constraint_comments_noaccess; + +-- Leave some constraints for the pg_upgrade test to pick up +CREATE DOMAIN constraint_comments_dom AS int; + +ALTER DOMAIN constraint_comments_dom ADD CONSTRAINT inv_ck CHECK (value > 0) NOT VALID; +COMMENT ON CONSTRAINT inv_ck ON DOMAIN constraint_comments_dom IS 'comment on invalid constraint'; + +-- Create a table that exercises pg_upgrade +CREATE TABLE regress_notnull1 (a integer); +CREATE TABLE regress_notnull2 () INHERITS (regress_notnull1); +ALTER TABLE ONLY regress_notnull2 ALTER COLUMN a SET NOT NULL; diff --git a/crates/squawk_parser/tests/data/regression_suite/conversion.sql b/crates/squawk_parser/tests/data/regression_suite/conversion.sql index 90fcf64c..70b6e498 100644 --- a/crates/squawk_parser/tests/data/regression_suite/conversion.sql +++ b/crates/squawk_parser/tests/data/regression_suite/conversion.sql @@ -3,7 +3,10 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix CREATE FUNCTION test_enc_setup() RETURNS void AS 'regresslib', 'test_enc_setup' @@ -295,6 +298,7 @@ insert into gb18030_inputs values ('\x666f6fcff3', 'valid'), ('\x666f6f8431a530', 'valid, no translation to UTF-8'), ('\x666f6f84309c38', 'valid, translates to UTF-8 by mapping function'), + ('\xa6d9', 'valid, changed from version 2000 to 2022'), ('\x666f6f84309c', 'incomplete char '), ('\x666f6f84309c0a', 'incomplete char, followed by newline '), ('\x666f6f84', 'incomplete char at end'), diff --git a/crates/squawk_parser/tests/data/regression_suite/copy.sql b/crates/squawk_parser/tests/data/regression_suite/copy.sql index 154d51fd..5eb639c0 100644 --- a/crates/squawk_parser/tests/data/regression_suite/copy.sql +++ b/crates/squawk_parser/tests/data/regression_suite/copy.sql @@ -3,6 +3,8 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR +-- \getenv abs_builddir PG_ABS_BUILDDIR --- test copying in CSV mode with various styles --- of embedded line ending characters @@ -17,6 +19,7 @@ insert into copytest values('Unix',E'abc\ndef',2); insert into copytest values('Mac',E'abc\rdef',3); insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4); +-- \set filename :abs_builddir '/results/copytest.csv' copy copytest to 'filename' csv; create temp table copytest2 (like copytest); @@ -37,6 +40,11 @@ select * from copytest except select * from copytest2; --- test unquoted \. as data inside CSV -- do not use copy out to export the data, as it would quote \. +-- \o :filename +-- \qecho line1 +-- \qecho '\\.' +-- \qecho line2 +-- \o -- get the data back in with copy truncate copytest2; copy copytest2(test) from 'filename' csv; @@ -49,10 +57,13 @@ copy copytest2(test) from stdin; -- line2 -- foo\. -- line3 +-- \. copy copytest2(test) from stdin; -- line4 -- line5 +-- \.foo -- line6 +-- \. select test from copytest2; @@ -67,6 +78,7 @@ copy copytest3 from stdin csv header; -- this is just a line full of junk that would error out if parsed -- 1,a,1 -- 2,b,2 +-- \. copy copytest3 to stdout csv header; @@ -78,9 +90,40 @@ copy copytest4 from stdin (header); -- this is just a line full of junk that would error out if parsed -- 1 a -- 2 b +-- \. copy copytest4 to stdout (header); +-- test multi-line header line feature + +create temp table copytest5 (c1 int); + +copy copytest5 from stdin (format csv, header 2); +-- this is a first header line. +-- this is a second header line. +-- 1 +-- 2 +-- \. +copy copytest5 to stdout (header); + +truncate copytest5; +copy copytest5 from stdin (format csv, header 4); +-- this is a first header line. +-- this is a second header line. +-- 1 +-- 2 +-- \. +select count(*) from copytest5; + +truncate copytest5; +copy copytest5 from stdin (format csv, header 5); +-- this is a first header line. +-- this is a second header line. +-- 1 +-- 2 +-- \. +select count(*) from copytest5; + -- test copy from with a partitioned table create table parted_copytest ( a int, @@ -100,6 +143,7 @@ insert into parted_copytest select x,1,'One' from generate_series(1,1000) x; insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x; insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x; +-- \set filename :abs_builddir '/results/parted_copytest.csv' copy (select * from parted_copytest order by a) to 'filename'; truncate parted_copytest; @@ -141,6 +185,7 @@ drop trigger part_ins_trig on parted_copytest_a2; copy parted_copytest from stdin; -- 1 1 str1 -- 2 2 str2 +-- \. -- Ensure index entries were properly added during the copy. select * from parted_copytest where b = 1; @@ -200,9 +245,11 @@ copy tab_progress_reporting from stdin; -- sharon 25 (15,12) 1000 sam -- sam 30 (10,5) 2000 bill -- bill 20 (11,10) 1000 sharon +-- \. -- Generate COPY FROM report with FILE, with some excluded tuples. truncate tab_progress_reporting; +-- \set filename :abs_srcdir '/data/emp.data' copy tab_progress_reporting from 'filename' where (salary < 2000); @@ -211,6 +258,7 @@ copy tab_progress_reporting from stdin(on_error ignore); -- sharon x (15,12) x sam -- sharon 25 (15,12) 1000 sam -- sharon y (15,12) x sam +-- \. drop trigger check_after_tab_progress_reporting on tab_progress_reporting; drop function notice_after_tab_progress_reporting(); @@ -227,32 +275,40 @@ alter table header_copytest drop column c; alter table header_copytest add column c text; copy header_copytest to stdout with (header match); copy header_copytest from stdin with (header wrong_choice); --- works +-- -- works copy header_copytest from stdin with (header match); -- a b c -- 1 2 foo +-- \. copy header_copytest (c, a, b) from stdin with (header match); -- c a b -- bar 3 4 +-- \. copy header_copytest from stdin with (header match, format csv); -- a,b,c -- 5,6,baz +-- \. -- errors copy header_copytest (c, b, a) from stdin with (header match); -- a b c -- 1 2 foo +-- \. copy header_copytest from stdin with (header match); -- a b \N -- 1 2 foo +-- \. copy header_copytest from stdin with (header match); -- a b -- 1 2 +-- \. copy header_copytest from stdin with (header match); -- a b c d -- 1 2 foo bar +-- \. copy header_copytest from stdin with (header match); -- a b d -- 1 2 foo +-- \. SELECT * FROM header_copytest ORDER BY a; -- Drop an extra column, in the middle of the existing set. @@ -261,16 +317,20 @@ alter table header_copytest drop column b; copy header_copytest (c, a) from stdin with (header match); -- c a -- foo 7 +-- \. copy header_copytest (a, c) from stdin with (header match); -- a c -- 8 foo +-- \. -- errors copy header_copytest from stdin with (header match); -- a ........pg.dropped.2........ c -- 1 2 foo +-- \. copy header_copytest (a, c) from stdin with (header match); -- a c b -- 1 foo 2 +-- \. SELECT * FROM header_copytest ORDER BY a; drop table header_copytest; @@ -281,10 +341,13 @@ create temp table oversized_column_default ( col2 varchar(5)); -- normal COPY should work copy oversized_column_default from stdin; +-- \. -- error if the column is excluded copy oversized_column_default (col2) from stdin; +-- \. -- error if the DEFAULT option is given copy oversized_column_default from stdin (default ''); +-- \. drop table oversized_column_default; @@ -313,6 +376,7 @@ CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1); -- relation extension). See -- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org -- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us +-- \set filename :abs_srcdir '/data/desc.data' COPY parted_si(id, data) FROM 'filename'; -- An earlier bug (see commit b1ecb9b3fcf) could end up using a buffer from @@ -330,6 +394,7 @@ create server copytest_server foreign data wrapper copytest_wrapper; create foreign table copytest_foreign_table (a int) server copytest_server; copy copytest_foreign_table from stdin (freeze); -- 1 +-- \. rollback; -- Tests for COPY TO with materialized views. @@ -340,3 +405,17 @@ COPY copytest_mv(id) TO stdout WITH (header); REFRESH MATERIALIZED VIEW copytest_mv; COPY copytest_mv(id) TO stdout WITH (header); DROP MATERIALIZED VIEW copytest_mv; + +-- Tests for COPY TO with partitioned tables. +-- The child table pp_2 has a different column order than the root table pp. +-- Check if COPY TO exports tuples as the root table's column order. +CREATE TABLE pp (id int,val int) PARTITION BY RANGE (id); +CREATE TABLE pp_1 (val int, id int) PARTITION BY RANGE (id); +CREATE TABLE pp_2 (id int, val int) PARTITION BY RANGE (id); +ALTER TABLE pp ATTACH PARTITION pp_1 FOR VALUES FROM (1) TO (5); +ALTER TABLE pp ATTACH PARTITION pp_2 FOR VALUES FROM (5) TO (10); +CREATE TABLE pp_15 PARTITION OF pp_1 FOR VALUES FROM (1) TO (5); +CREATE TABLE pp_510 PARTITION OF pp_2 FOR VALUES FROM (5) TO (10); +INSERT INTO pp SELECT g, 10 + g FROM generate_series(1,6) g; +COPY pp TO stdout(header); +DROP TABLE PP; diff --git a/crates/squawk_parser/tests/data/regression_suite/copy2.sql b/crates/squawk_parser/tests/data/regression_suite/copy2.sql index 256d7de3..5cbe243d 100644 --- a/crates/squawk_parser/tests/data/regression_suite/copy2.sql +++ b/crates/squawk_parser/tests/data/regression_suite/copy2.sql @@ -26,27 +26,143 @@ FOR EACH ROW EXECUTE PROCEDURE fn_x_after(); CREATE TRIGGER trg_x_before BEFORE INSERT ON x FOR EACH ROW EXECUTE PROCEDURE fn_x_before(); - - - +COPY x (a, b, c, d, e) from stdin; +-- 9999 \N \\N \NN \N +-- 10000 21 31 41 51 +-- \. + +COPY x (b, d) from stdin; +-- 1 test_1 +-- \. + +COPY x (b, d) from stdin; +-- 2 test_2 +-- 3 test_3 +-- 4 test_4 +-- 5 test_5 +-- \. + +COPY x (a, b, c, d, e) from stdin; +-- 10001 22 32 42 52 +-- 10002 23 33 43 53 +-- 10003 24 34 44 54 +-- 10004 25 35 45 55 +-- 10005 26 36 46 56 +-- \. -- non-existent column in column list: should fail --- redundant options --- incorrect options +COPY x (xyz) from stdin; +-- +-- -- redundant options +COPY x from stdin (format CSV, FORMAT CSV); +COPY x from stdin (freeze off, freeze on); +COPY x from stdin (delimiter ',', delimiter ','); +COPY x from stdin (null ' ', null ' '); +COPY x from stdin (header off, header on); +COPY x from stdin (quote ':', quote ':'); +COPY x from stdin (escape ':', escape ':'); +COPY x from stdin (force_quote (a), force_quote *); +COPY x from stdin (force_not_null (a), force_not_null (b)); +COPY x from stdin (force_null (a), force_null (b)); +COPY x from stdin (convert_selectively (a), convert_selectively (b)); +COPY x from stdin (encoding 'sql_ascii', encoding 'sql_ascii'); +COPY x from stdin (on_error ignore, on_error ignore); +COPY x from stdin (log_verbosity default, log_verbosity verbose); +-- +-- -- incorrect options +COPY x from stdin (format BINARY, delimiter ','); +COPY x from stdin (format BINARY, null 'x'); +COPY x from stdin (format BINARY, on_error ignore); +COPY x from stdin (on_error unsupported); +COPY x from stdin (format TEXT, force_quote(a)); +COPY x from stdin (format TEXT, force_quote *); +COPY x from stdin (format CSV, force_quote(a)); +COPY x from stdin (format CSV, force_quote *); +COPY x from stdin (format TEXT, force_not_null(a)); +COPY x from stdin (format TEXT, force_not_null *); +-- COPY x to stdout (format CSV, force_not_null(a)); +COPY x to stdout (format CSV, force_not_null *); +COPY x from stdin (format TEXT, force_null(a)); +COPY x from stdin (format TEXT, force_null *); +-- COPY x to stdout (format CSV, force_null(a)); +COPY x to stdout (format CSV, force_null *); +COPY x to stdout (format BINARY, on_error unsupported); +COPY x from stdin (log_verbosity unsupported); +COPY x from stdin with (reject_limit 1); +COPY x from stdin with (on_error ignore, reject_limit 0); +COPY x from stdin with (header -1); +COPY x from stdin with (header 2.5); +-- COPY x to stdout with (header 2); + -- too many columns in column list: should fail --- missing data: should fail +COPY x (a, b, c, d, e, d, c) from stdin; +-- +-- -- missing data: should fail +COPY x from stdin; +-- +-- \. +COPY x from stdin; +-- 2000 230 23 23 +-- \. +COPY x from stdin; +-- 2001 231 \N \N +-- \. -- extra data: should fail +COPY x from stdin; +-- 2002 232 40 50 60 70 80 +-- \. -- various COPY options: delimiters, oids, NULL string, encoding - - +COPY x (b, c, d, e) from stdin delimiter ',' null 'x'; +-- x,45,80,90 +-- x,\x,\\x,\\\x +-- x,\,,\\\,,\\ +-- \. + +COPY x from stdin WITH DELIMITER AS ';' NULL AS ''; +-- 3000;;c;; +-- \. + +COPY x from stdin WITH DELIMITER AS ':' NULL AS E'\\X' ENCODING 'sql_ascii'; +-- 4000:\X:C:\X:\X +-- 4001:1:empty:: +-- 4002:2:null:\X:\X +-- 4003:3:Backslash:\\:\\ +-- 4004:4:BackslashX:\\X:\\X +-- 4005:5:N:\N:\N +-- 4006:6:BackslashN:\\N:\\N +-- 4007:7:XX:\XX:\XX +-- 4008:8:Delimiter:\::\: +-- \. COPY x TO stdout WHERE a = 1; - - - --- check results of copy in +COPY x from stdin WHERE a = 50004; +-- 50003 24 34 44 54 +-- 50004 25 35 45 55 +-- 50005 26 36 46 56 +-- \. + +COPY x from stdin WHERE a > 60003; +-- 60001 22 32 42 52 +-- 60002 23 33 43 53 +-- 60003 24 34 44 54 +-- 60004 25 35 45 55 +-- 60005 26 36 46 56 +-- \. + +COPY x from stdin WHERE f > 60003; +-- +COPY x from stdin WHERE a = max(x.b); +-- +COPY x from stdin WHERE a IN (SELECT 1 FROM x); +-- +COPY x from stdin WHERE a IN (generate_series(1,5)); +-- +COPY x from stdin WHERE a = row_number() over(b); +-- +-- +-- -- check results of copy in SELECT * FROM x; -- check copy out @@ -75,18 +191,30 @@ COPY y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|'); COPY y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\'); COPY y TO stdout (FORMAT CSV, FORCE_QUOTE *); +-- \copy y TO stdout (FORMAT CSV) +-- \copy y TO stdout (FORMAT CSV, QUOTE '''', DELIMITER '|') +-- \copy y TO stdout (FORMAT CSV, FORCE_QUOTE (col2), ESCAPE E'\\') +-- \copy y TO stdout (FORMAT CSV, FORCE_QUOTE *) --test that we read consecutive LFs properly CREATE TEMP TABLE testnl (a int, b text, c int); +COPY testnl FROM stdin CSV; +-- 1,"a field with two LFs +-- -- inside",2 +-- \. -- test end of copy marker CREATE TEMP TABLE testeoc (a text); +COPY testeoc FROM stdin CSV; +-- a\. +-- \.b -- c\.d -- "\." +-- \. COPY testeoc TO stdout CSV; @@ -97,45 +225,85 @@ INSERT INTO testnull VALUES (1, E'\\0'), (NULL, NULL); COPY testnull TO stdout WITH NULL AS E'\\0'; +COPY testnull FROM stdin WITH NULL AS E'\\0'; +-- 42 \\0 +-- \0 \0 +-- \. SELECT * FROM testnull; BEGIN; CREATE TABLE vistest (LIKE testeoc); +COPY vistest FROM stdin CSV; +-- a0 +-- b +-- \. COMMIT; SELECT * FROM vistest; BEGIN; TRUNCATE vistest; +COPY vistest FROM stdin CSV; +-- a1 +-- b +-- \. SELECT * FROM vistest; SAVEPOINT s1; TRUNCATE vistest; +COPY vistest FROM stdin CSV; +-- d1 +-- e +-- \. SELECT * FROM vistest; COMMIT; SELECT * FROM vistest; BEGIN; TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +-- a2 +-- b +-- \. SELECT * FROM vistest; SAVEPOINT s1; TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +-- d2 +-- e +-- \. SELECT * FROM vistest; COMMIT; SELECT * FROM vistest; BEGIN; TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +-- x +-- y +-- \. SELECT * FROM vistest; COMMIT; TRUNCATE vistest; +COPY vistest FROM stdin CSV FREEZE; +-- p +-- g +-- \. BEGIN; TRUNCATE vistest; SAVEPOINT s1; +COPY vistest FROM stdin CSV FREEZE; +-- m +-- k +-- \. COMMIT; BEGIN; INSERT INTO vistest VALUES ('z'); SAVEPOINT s1; TRUNCATE vistest; ROLLBACK TO SAVEPOINT s1; +COPY vistest FROM stdin CSV FREEZE; +-- d3 +-- e +-- \. COMMIT; CREATE FUNCTION truncate_in_subxact() RETURNS VOID AS $$ @@ -149,6 +317,10 @@ $$ language plpgsql; BEGIN; INSERT INTO vistest VALUES ('z'); SELECT truncate_in_subxact(); +COPY vistest FROM stdin CSV FREEZE; +-- d4 +-- e +-- \. SELECT * FROM vistest; COMMIT; SELECT * FROM vistest; @@ -160,31 +332,66 @@ CREATE TEMP TABLE forcetest ( d TEXT, e TEXT ); +-- \pset null NULL -- should succeed with no effect ("b" remains an empty string, "c" remains NULL) BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b), FORCE_NULL(c)); +-- 1,,"" +-- \. COMMIT; SELECT b, c FROM forcetest WHERE a = 1; -- should succeed, FORCE_NULL and FORCE_NOT_NULL can be both specified BEGIN; +COPY forcetest (a, b, c, d) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(c,d), FORCE_NULL(c,d)); +-- 2,'a',,"" +-- \. COMMIT; SELECT c, d FROM forcetest WHERE a = 2; -- should fail with not-null constraint violation BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b), FORCE_NOT_NULL(c)); +-- 3,,"" +-- \. ROLLBACK; -- should fail with "not referenced by COPY" error BEGIN; +COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL(b)); +-- ROLLBACK; +-- should fail with "not referenced by COPY" error +BEGIN; +COPY forcetest (d, e) FROM STDIN WITH (FORMAT csv, FORCE_NULL(b)); +-- ROLLBACK; +-- should succeed with no effect ("b" remains an empty string, "c" remains NULL) +BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NULL *); +-- 4,,"" +-- \. COMMIT; SELECT b, c FROM forcetest WHERE a = 4; -- should succeed with effect ("b" remains an empty string) BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *); +-- 5,,"" +-- \. COMMIT; SELECT b, c FROM forcetest WHERE a = 5; -- should succeed with effect ("c" remains NULL) BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *); +-- 6,"b","" +-- \. COMMIT; SELECT b, c FROM forcetest WHERE a = 6; -- should fail with "conflicting or redundant options" error BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NOT_NULL *, FORCE_NOT_NULL(b)); +-- ROLLBACK; +-- should fail with "conflicting or redundant options" error +BEGIN; +COPY forcetest (a, b, c) FROM STDIN WITH (FORMAT csv, FORCE_NULL *, FORCE_NULL(b)); +-- ROLLBACK; + +-- \pset null '' -- test case with whole-row Var in a check constraint create table check_con_tbl (f1 int); @@ -194,10 +401,14 @@ begin return $1.f1 > 0; end $$ language plpgsql immutable; alter table check_con_tbl add check (check_con_function(check_con_tbl.*)); +-- \d+ check_con_tbl copy check_con_tbl from stdin; -- 1 +-- \N +-- \. copy check_con_tbl from stdin; -- 0 +-- \. select * from check_con_tbl; -- test with RLS enabled. @@ -205,6 +416,12 @@ CREATE ROLE regress_rls_copy_user; CREATE ROLE regress_rls_copy_user_colperms; CREATE TABLE rls_t1 (a int, b int, c int); +COPY rls_t1 (a, b, c) from stdin; +-- 1 4 1 +-- 2 3 2 +-- 3 2 3 +-- 4 1 4 +-- \. CREATE POLICY p1 ON rls_t1 FOR SELECT USING (a % 2 = 0); ALTER TABLE rls_t1 ENABLE ROW LEVEL SECURITY; @@ -258,6 +475,9 @@ RESET SESSION AUTHORIZATION; CREATE TABLE instead_of_insert_tbl(id serial, name text); CREATE VIEW instead_of_insert_tbl_view AS SELECT ''::text AS str; +COPY instead_of_insert_tbl_view FROM stdin; -- fail +-- test1 +-- \. CREATE FUNCTION fun_instead_of_insert_tbl() RETURNS trigger AS $$ BEGIN @@ -269,6 +489,9 @@ CREATE TRIGGER trig_instead_of_insert_tbl_view INSTEAD OF INSERT ON instead_of_insert_tbl_view FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl(); +COPY instead_of_insert_tbl_view FROM stdin; +-- test1 +-- \. SELECT * FROM instead_of_insert_tbl; @@ -281,26 +504,53 @@ CREATE TRIGGER trig_instead_of_insert_tbl_view_2 INSTEAD OF INSERT ON instead_of_insert_tbl_view_2 FOR EACH ROW EXECUTE PROCEDURE fun_instead_of_insert_tbl(); +COPY instead_of_insert_tbl_view_2 FROM stdin; +-- test1 +-- \. SELECT * FROM instead_of_insert_tbl; COMMIT; -- tests for on_error option CREATE TABLE check_ign_err (n int, m int[], k int); +COPY check_ign_err FROM STDIN WITH (on_error stop); +-- 1 {1} 1 +-- a {2} 2 +-- 3 {3} 3333333333 +-- 4 {a, 4} 4 +-- -- 5 {5} 5 +-- \. --- -- want context for notices +-- want context for notices +-- \set SHOW_CONTEXT always +COPY check_ign_err FROM STDIN WITH (on_error ignore, log_verbosity verbose); +-- 1 {1} 1 +-- a {2} 2 +-- 3 {3} 3333333333 +-- 4 {a, 4} 4 +-- -- 5 {5} 5 -- 6 a -- 7 {7} a -- 8 {8} 8 +-- \. -- tests for on_error option with log_verbosity and null constraint via domain CREATE DOMAIN dcheck_ign_err2 varchar(15) NOT NULL; CREATE TABLE check_ign_err2 (n int, m int[], k int, l dcheck_ign_err2); +COPY check_ign_err2 FROM STDIN WITH (on_error ignore, log_verbosity verbose); +-- 1 {1} 1 'foo' +-- 2 {2} 2 \N +-- \. +COPY check_ign_err2 FROM STDIN WITH (on_error ignore, log_verbosity silent); +-- 3 {3} 3 'bar' +-- 4 {4} 4 \N +-- \. -- reset context choice +-- \set SHOW_CONTEXT errors SELECT * FROM check_ign_err; @@ -308,15 +558,38 @@ SELECT * FROM check_ign_err2; -- test datatype error that can't be handled as soft: should fail CREATE TABLE hard_err(foo widget); +COPY hard_err FROM STDIN WITH (on_error ignore); +-- 1 +-- \. -- test missing data: should fail +COPY check_ign_err FROM STDIN WITH (on_error ignore); +-- 1 {1} +-- \. -- test extra data: should fail +COPY check_ign_err FROM STDIN WITH (on_error ignore); +-- 1 {1} 3 abc +-- \. -- tests for reject_limit option +COPY check_ign_err FROM STDIN WITH (on_error ignore, reject_limit 3); +-- 6 {6} 6 +-- a {7} 7 +-- 8 {8} 8888888888 +-- 9 {a, 9} 9 +-- -- 10 {10} 10 - +-- \. + +COPY check_ign_err FROM STDIN WITH (on_error ignore, reject_limit 4); +-- 6 {6} 6 +-- a {7} 7 +-- 8 {8} 8888888888 +-- 9 {a, 9} 9 +-- -- 10 {10} 10 +-- \. -- clean up DROP TABLE forcetest; @@ -351,6 +624,7 @@ create temp table copy_default ( copy copy_default from stdin; -- 1 value '2022-07-04' -- 2 \D '2022-07-05' +-- \. select id, text_value, ts_value from copy_default; @@ -359,6 +633,7 @@ truncate copy_default; copy copy_default from stdin with (format csv); -- 1,value,2022-07-04 -- 2,\D,2022-07-05 +-- \. select id, text_value, ts_value from copy_default; @@ -366,32 +641,37 @@ truncate copy_default; -- DEFAULT cannot be used in binary mode copy copy_default from stdin with (format binary, default '\D'); - --- DEFAULT cannot be new line nor carriage return +-- +-- -- DEFAULT cannot be new line nor carriage return copy copy_default from stdin with (default E'\n'); copy copy_default from stdin with (default E'\r'); - --- DELIMITER cannot appear in DEFAULT spec +-- +-- -- DELIMITER cannot appear in DEFAULT spec copy copy_default from stdin with (delimiter ';', default 'test;test'); - --- CSV quote cannot appear in DEFAULT spec +-- +-- -- CSV quote cannot appear in DEFAULT spec copy copy_default from stdin with (format csv, quote '"', default 'test"test'); - --- NULL and DEFAULT spec must be different +-- +-- -- NULL and DEFAULT spec must be different copy copy_default from stdin with (default '\N'); - --- cannot use DEFAULT marker in column that has no DEFAULT value +-- +-- -- cannot use DEFAULT marker in column that has no DEFAULT value copy copy_default from stdin with (default '\D'); +-- \D value '2022-07-04' -- 2 \D '2022-07-05' +-- \. copy copy_default from stdin with (format csv, default '\D'); +-- \D,value,2022-07-04 -- 2,\D,2022-07-05 +-- \. -- The DEFAULT marker must be unquoted and unescaped or it's not recognized copy copy_default from stdin with (default '\D'); -- 1 \D '2022-07-04' -- 2 \\D '2022-07-04' -- 3 "\D" '2022-07-04' +-- \. select id, text_value, ts_value from copy_default; @@ -401,6 +681,7 @@ copy copy_default from stdin with (format csv, default '\D'); -- 1,\D,2022-07-04 -- 2,\\D,2022-07-04 -- 3,"\D",2022-07-04 +-- \. select id, text_value, ts_value from copy_default; @@ -411,6 +692,7 @@ copy copy_default from stdin with (default '\D'); -- 1 value '2022-07-04' -- 2 \D '2022-07-03' -- 3 \D \D +-- \. select id, text_value, ts_value from copy_default; @@ -420,6 +702,7 @@ copy copy_default from stdin with (format csv, default '\D'); -- 1,value,2022-07-04 -- 2,\D,2022-07-03 -- 3,\D,\D +-- \. select id, text_value, ts_value from copy_default; diff --git a/crates/squawk_parser/tests/data/regression_suite/copydml.sql b/crates/squawk_parser/tests/data/regression_suite/copydml.sql index b3d3bec3..23c4a4f7 100644 --- a/crates/squawk_parser/tests/data/regression_suite/copydml.sql +++ b/crates/squawk_parser/tests/data/regression_suite/copydml.sql @@ -18,6 +18,9 @@ copy (delete from copydml_test where t = 'g' returning id) to stdout; -- -- Test \copy (insert/update/delete ...) -- +-- \copy (insert into copydml_test (t) values ('f') returning id) to stdout; +-- \copy (update copydml_test set t = 'g' where t = 'f' returning id) to stdout; +-- \copy (delete from copydml_test where t = 'g' returning id) to stdout; -- Error cases copy (insert into copydml_test default values) to stdout; diff --git a/crates/squawk_parser/tests/data/regression_suite/copyencoding.sql b/crates/squawk_parser/tests/data/regression_suite/copyencoding.sql index 4d2a184c..c8a9f290 100644 --- a/crates/squawk_parser/tests/data/regression_suite/copyencoding.sql +++ b/crates/squawk_parser/tests/data/regression_suite/copyencoding.sql @@ -4,10 +4,15 @@ -- skip test if not UTF8 server encoding SELECT getdatabaseencoding() <> 'UTF8' - AS skip_test ; + AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif -- directory paths are passed to us in environment variables +-- \getenv abs_builddir PG_ABS_BUILDDIR +-- \set utf8_csv :abs_builddir '/results/copyencoding_utf8.csv' CREATE TABLE copy_encoding_tab (t text); diff --git a/crates/squawk_parser/tests/data/regression_suite/copyselect.sql b/crates/squawk_parser/tests/data/regression_suite/copyselect.sql index b44a3020..45ce1afd 100644 --- a/crates/squawk_parser/tests/data/regression_suite/copyselect.sql +++ b/crates/squawk_parser/tests/data/regression_suite/copyselect.sql @@ -42,7 +42,7 @@ copy (select t into temp test3 from test1 where id=3) to stdout; -- This should fail -- copy (select * from test1) from stdin; --- +-- -- -- This should fail -- -- copy (select * from test1) (t,id) to stdout; @@ -65,12 +65,15 @@ copy (select t from test1 where id = 1) to stdout csv header force quote t; -- -- Test psql builtins, plain table -- +-- \copy test1 to stdout -- -- This should fail -- +-- \copy v_test1 to stdout -- -- Test \copy (select ...) -- +-- \copy (select "id",'id','id""'||t,(id + 1)*id,t,"test1"."t" from test1 where id=3) to stdout -- -- Drop everything -- @@ -86,6 +89,8 @@ drop table test1; create table test3 (c int); -- select 0\; copy test3 from stdin\; copy test3 from stdin\; select 1; -- 0 1 -- 1 +-- \. -- 2 +-- \. select * from test3; drop table test3; diff --git a/crates/squawk_parser/tests/data/regression_suite/create_aggregate.sql b/crates/squawk_parser/tests/data/regression_suite/create_aggregate.sql index 6cbe5779..01c09ce2 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_aggregate.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_aggregate.sql @@ -135,6 +135,7 @@ alter aggregate my_percentile_disc(float8 ORDER BY anyelement) alter aggregate my_rank(VARIADIC "any" ORDER BY VARIADIC "any") rename to test_rank; +-- \da test_* -- moving-aggregate options diff --git a/crates/squawk_parser/tests/data/regression_suite/create_function_c.sql b/crates/squawk_parser/tests/data/regression_suite/create_function_c.sql index c9cce883..af466b01 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_function_c.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_function_c.sql @@ -7,7 +7,10 @@ -- test script that needs them. All that remains here is error cases. -- directory path and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix -- -- Check LOAD command. (The alternative of implicitly loading the library @@ -22,8 +25,10 @@ CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C -- To produce stable regression test output, we have to filter the name -- of the regresslib file out of the error message in this test. +-- \set VERBOSITY sqlstate CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C AS 'regresslib', 'nosuchsymbol'; +-- \set VERBOSITY default SELECT regexp_replace('LAST_ERROR_MESSAGE', 'file ".*"', 'file "..."'); CREATE FUNCTION test1 (int) RETURNS int LANGUAGE internal diff --git a/crates/squawk_parser/tests/data/regression_suite/create_function_sql.sql b/crates/squawk_parser/tests/data/regression_suite/create_function_sql.sql index 6d1c102d..4543273f 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_function_sql.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_function_sql.sql @@ -241,6 +241,17 @@ SELECT functest_S_14(); DROP TABLE functest3 CASCADE; +-- Check reporting of temporary-object dependencies within SQL-standard body +-- (tests elsewhere already cover dependencies on arg and result types) +CREATE TEMP SEQUENCE mytempseq; + +CREATE FUNCTION functest_tempseq() RETURNS int + RETURN nextval('mytempseq'); + +-- This discards mytempseq and therefore functest_tempseq(). If it fails to, +-- the function will appear in the information_schema tests below. +DISCARD TEMP; + -- information_schema tests @@ -432,6 +443,23 @@ $$ SELECT array_append($1, $2) || array_append($1, $2) $$; SELECT double_append(array_append(ARRAY[q1], q2), q3) FROM (VALUES(1,2,3), (4,5,6)) v(q1,q2,q3); +-- Check that we can re-use a SQLFunctionCache after a run-time error. + +-- This function will fail with zero-divide at run time (not plan time). +CREATE FUNCTION part_hashint4_error(value int4, seed int8) RETURNS int8 +LANGUAGE SQL STRICT IMMUTABLE PARALLEL SAFE AS +$$ SELECT value + seed + random()::int/0 $$; + +-- Put it into an operator class so that FmgrInfo will be cached in relcache. +CREATE OPERATOR CLASS part_test_int4_ops_bad FOR TYPE int4 USING hash AS + FUNCTION 2 part_hashint4_error(int4, int8); + +CREATE TABLE pt(i int) PARTITION BY hash (i part_test_int4_ops_bad); +CREATE TABLE p1 PARTITION OF pt FOR VALUES WITH (modulus 4, remainder 0); + +INSERT INTO pt VALUES (1); +INSERT INTO pt VALUES (1); + -- Things that shouldn't work: CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL diff --git a/crates/squawk_parser/tests/data/regression_suite/create_index.sql b/crates/squawk_parser/tests/data/regression_suite/create_index.sql index 006c010d..0fafe539 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_index.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_index.sql @@ -4,6 +4,7 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR -- -- BTREE @@ -69,6 +70,7 @@ CREATE TABLE fast_emp4000 ( home_base box ); +-- \set filename :abs_srcdir '/data/rect.data' COPY slow_emp4000 FROM 'filename'; INSERT INTO fast_emp4000 SELECT * FROM slow_emp4000; @@ -266,6 +268,7 @@ CREATE TABLE array_index_op_test ( t text[] ); +-- \set filename :abs_srcdir '/data/array.data' COPY array_index_op_test FROM 'filename'; ANALYZE array_index_op_test; @@ -351,6 +354,7 @@ DROP TABLE array_gin_test; -- CREATE INDEX gin_relopts_test ON array_index_op_test USING gin (i) WITH (FASTUPDATE=on, GIN_PENDING_LIST_LIMIT=128); +-- \d+ gin_relopts_test -- -- HASH @@ -406,6 +410,9 @@ DELETE FROM unique_tbl WHERE t = 'seven'; CREATE UNIQUE INDEX unique_idx4 ON unique_tbl (i) NULLS NOT DISTINCT; -- ok now +-- \d unique_tbl +-- \d unique_idx3 +-- \d unique_idx4 SELECT pg_get_indexdef('unique_idx3'::regclass); SELECT pg_get_indexdef('unique_idx4'::regclass); @@ -427,6 +434,8 @@ INSERT INTO func_index_heap VALUES('ABCD', 'EF'); INSERT INTO func_index_heap VALUES('QWERTY'); -- while we're here, see that the metadata looks sane +-- \d func_index_heap +-- \d func_index_index -- @@ -445,6 +454,8 @@ INSERT INTO func_index_heap VALUES('ABCD', 'EF'); INSERT INTO func_index_heap VALUES('QWERTY'); -- while we're here, see that the metadata looks sane +-- \d func_index_heap +-- \d func_index_index -- this should fail because of unsafe column type (anonymous record) create index on func_index_heap ((f1 || f2), (row(f1, f2))); @@ -520,7 +531,9 @@ VACUUM FULL concur_heap; REINDEX TABLE concur_heap; DELETE FROM concur_heap WHERE f1 = 'b'; VACUUM FULL concur_heap; +-- \d concur_heap REINDEX TABLE concur_heap; +-- \d concur_heap -- Temporary tables with concurrent builds and on-commit actions -- CONCURRENTLY used with CREATE INDEX and DROP INDEX is ignored. @@ -566,6 +579,7 @@ DROP INDEX CONCURRENTLY "concur_index5"; DROP INDEX CONCURRENTLY "concur_index1"; DROP INDEX CONCURRENTLY "concur_heap_expr_idx"; +-- \d concur_heap DROP TABLE concur_heap; @@ -582,12 +596,16 @@ INSERT INTO cwi_test VALUES(1, 2), (3, 4), (5, 6); CREATE UNIQUE INDEX cwi_uniq_idx ON cwi_test(a , b); ALTER TABLE cwi_test ADD primary key USING INDEX cwi_uniq_idx; +-- \d cwi_test +-- \d cwi_uniq_idx CREATE UNIQUE INDEX cwi_uniq2_idx ON cwi_test(b , a); ALTER TABLE cwi_test DROP CONSTRAINT cwi_uniq_idx, ADD CONSTRAINT cwi_replaced_pkey PRIMARY KEY USING INDEX cwi_uniq2_idx; +-- \d cwi_test +-- \d cwi_replaced_pkey DROP INDEX cwi_replaced_pkey; -- Should fail; a constraint depends on it @@ -617,7 +635,7 @@ DROP TABLE cwi_test; CREATE TABLE syscol_table (a INT); -- System columns cannot be indexed -CREATE INDEX ON syscolcol_table (ctid); +CREATE INDEX ON syscol_table (ctid); -- nor used in expressions CREATE INDEX ON syscol_table ((ctid >= '(1000,0)')); @@ -1015,7 +1033,9 @@ explain (costs off) -- REINDEX (VERBOSE) -- CREATE TABLE reindex_verbose(id integer primary key); +-- \set VERBOSITY terse \\ -- suppress machine-dependent details REINDEX (VERBOSE) TABLE reindex_verbose; +-- \set VERBOSITY default DROP TABLE reindex_verbose; -- @@ -1112,6 +1132,7 @@ CREATE INDEX concur_appclass_ind on concur_appclass_tab CREATE INDEX concur_appclass_ind_2 on concur_appclass_tab USING gist (k tsvector_ops (siglen='300'), j tsvector_ops); REINDEX TABLE CONCURRENTLY concur_appclass_tab; +-- \d concur_appclass_tab DROP TABLE concur_appclass_tab; -- Partitions @@ -1281,6 +1302,7 @@ REINDEX SCHEMA CONCURRENTLY pg_catalog; REINDEX DATABASE not_current_database; -- Check the relation status, there should not be invalid indexes +-- \d concur_reindex_tab DROP MATERIALIZED VIEW concur_reindex_matview; DROP TABLE concur_reindex_tab, concur_reindex_tab2, concur_reindex_tab3; @@ -1292,13 +1314,16 @@ CREATE UNIQUE INDEX CONCURRENTLY concur_reindex_ind5 ON concur_reindex_tab4 (c1) -- Reindexing concurrently this index fails with the same failure. -- The extra index created is itself invalid, and can be dropped. REINDEX INDEX CONCURRENTLY concur_reindex_ind5; +-- \d concur_reindex_tab4 DROP INDEX concur_reindex_ind5_ccnew; -- This makes the previous failure go away, so the index can become valid. DELETE FROM concur_reindex_tab4 WHERE c1 = 1; -- The invalid index is not processed when running REINDEX TABLE. REINDEX TABLE CONCURRENTLY concur_reindex_tab4; +-- \d concur_reindex_tab4 -- But it is fixed with REINDEX INDEX. REINDEX INDEX CONCURRENTLY concur_reindex_ind5; +-- \d concur_reindex_tab4 DROP TABLE concur_reindex_tab4; -- Check handling of indexes with expressions and predicates. The @@ -1381,8 +1406,8 @@ CREATE TABLE reindex_temp_before AS SELECT oid, relname, relfilenode, relkind, reltoastrelid FROM pg_class WHERE relname IN ('concur_temp_ind_1', 'concur_temp_ind_2'); -SELECT pg_my_temp_schema()::regnamespace as temp_schema_name ; -REINDEX SCHEMA CONCURRENTLY "temp_schema_name"; +SELECT pg_my_temp_schema()::regnamespace as temp_schema_name /* \gset */; +REINDEX SCHEMA CONCURRENTLY temp_schema_name; SELECT b.relname, b.relkind, CASE WHEN a.relfilenode = b.relfilenode THEN 'relfilenode is unchanged' diff --git a/crates/squawk_parser/tests/data/regression_suite/create_operator.sql b/crates/squawk_parser/tests/data/regression_suite/create_operator.sql index 1f6080af..d993ea16 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_operator.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_operator.sql @@ -22,6 +22,13 @@ CREATE OPERATOR #%# ( -- Test operator created above SELECT @#@ 24; +-- Test error cases +select @@##@@ 24; -- no such operator +set search_path = pg_catalog; +select @#@ 24; -- wrong schema +reset search_path; +select @#@ 24.0; -- wrong data type + -- Test comments COMMENT ON OPERATOR ###### (NONE, int4) IS 'bad prefix'; COMMENT ON OPERATOR ###### (int4, NONE) IS 'bad postfix'; @@ -86,21 +93,21 @@ ROLLBACK; -- Should fail. SETOF type functions not allowed as argument (testing leftarg) --- BEGIN TRANSACTION; --- CREATE OPERATOR #*# ( --- leftarg = SETOF int8, --- procedure = factorial --- ); --- ROLLBACK; +BEGIN TRANSACTION; +CREATE OPERATOR #*# ( + leftarg = SETOF int8, + procedure = factorial +); +ROLLBACK; -- Should fail. SETOF type functions not allowed as argument (testing rightarg) --- BEGIN TRANSACTION; --- CREATE OPERATOR #*# ( --- rightarg = SETOF int8, --- procedure = factorial --- ); --- ROLLBACK; +BEGIN TRANSACTION; +CREATE OPERATOR #*# ( + rightarg = SETOF int8, + procedure = factorial +); +ROLLBACK; -- Should work. Sample text-book case diff --git a/crates/squawk_parser/tests/data/regression_suite/create_procedure.sql b/crates/squawk_parser/tests/data/regression_suite/create_procedure.sql index 95781da7..4e8c5f02 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_procedure.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_procedure.sql @@ -11,11 +11,14 @@ AS $$ INSERT INTO cp_test VALUES (1, x); $$; +-- \df ptest1 SELECT pg_get_functiondef('ptest1'::regproc); -- show only normal functions +-- \dfn public.*test*1 -- show only procedures +-- \dfp public.*test*1 SELECT ptest1('x'); -- error CALL ptest1('a'); -- ok @@ -32,6 +35,7 @@ BEGIN ATOMIC INSERT INTO cp_test VALUES (1, x); END; +-- \df ptest1s SELECT pg_get_functiondef('ptest1s'::regproc); CALL ptest1s('b'); @@ -171,6 +175,7 @@ CREATE PROCEDURE ptest8(x text) BEGIN ATOMIC END; +-- \df ptest8 SELECT pg_get_functiondef('ptest8'::regproc); CALL ptest8(''); @@ -211,15 +216,18 @@ CALL ptest11(null, 11, 12, 13); CREATE PROCEDURE ptest10(IN a int, IN b int, IN c int) LANGUAGE SQL AS $$ SELECT a + b - c $$; +-- \df ptest10 drop procedure ptest10; -- fail drop procedure ptest10(int, int, int); -- fail begin; drop procedure ptest10(out int, int, int); +-- \df ptest10 drop procedure ptest10(int, int, int); -- now this would work rollback; begin; drop procedure ptest10(in int, int, int); +-- \df ptest10 drop procedure ptest10(int, int, int); -- now this would work rollback; diff --git a/crates/squawk_parser/tests/data/regression_suite/create_schema.sql b/crates/squawk_parser/tests/data/regression_suite/create_schema.sql index e6ea4a5c..140d143b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_schema.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_schema.sql @@ -51,15 +51,18 @@ RESET ROLE; -- The schema created matches the role name. CREATE SCHEMA AUTHORIZATION regress_create_schema_role CREATE TABLE regress_create_schema_role.tab (id int); +-- \d regress_create_schema_role.tab DROP SCHEMA regress_create_schema_role CASCADE; -- Again, with a different role specification and no schema names. SET ROLE regress_create_schema_role; CREATE SCHEMA AUTHORIZATION CURRENT_ROLE CREATE TABLE regress_create_schema_role.tab (id int); +-- \d regress_create_schema_role.tab DROP SCHEMA regress_create_schema_role CASCADE; -- Again, with a schema name and a role specification. CREATE SCHEMA regress_schema_1 AUTHORIZATION CURRENT_ROLE CREATE TABLE regress_schema_1.tab (id int); +-- \d regress_schema_1.tab DROP SCHEMA regress_schema_1 CASCADE; RESET ROLE; diff --git a/crates/squawk_parser/tests/data/regression_suite/create_table.sql b/crates/squawk_parser/tests/data/regression_suite/create_table.sql index 915c1c1c..81477546 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_table.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_table.sql @@ -51,8 +51,11 @@ DEALLOCATE select1; -- create an extra wide table to test for issues related to that -- (temporarily hide query, to avoid the long CREATE TABLE stmt) +-- \set ECHO none SELECT 'CREATE TABLE extra_wide_table(firstc text, '|| array_to_string(array_agg('c'||i||' bool'),',')||', lastc text);' FROM generate_series(1, 1100) g(i); +-- \gexec +-- \set ECHO all INSERT INTO extra_wide_table(firstc, lastc) VALUES('first col', 'last col'); SELECT firstc, lastc FROM extra_wide_table; @@ -65,6 +68,14 @@ CREATE TABLE withoid() WITH (oids = true); CREATE TEMP TABLE withoutoid() WITHOUT OIDS; DROP TABLE withoutoid; CREATE TEMP TABLE withoutoid() WITH (oids = false); DROP TABLE withoutoid; +-- temporary tables are ignored by pg_filenode_relation(). +CREATE TEMP TABLE relation_filenode_check(c1 int); +SELECT relpersistence, + pg_filenode_relation (reltablespace, pg_relation_filenode(oid)) + FROM pg_class + WHERE relname = 'relation_filenode_check'; +DROP TABLE relation_filenode_check; + -- check restriction with default expressions -- invalid use of column reference in default expressions CREATE TABLE default_expr_column (id int DEFAULT (id)); @@ -215,9 +226,12 @@ CREATE TABLE partitioned2 ( CREATE TABLE fail () INHERITS (partitioned2); -- Partition key in describe output +-- \d partitioned +-- \d+ partitioned2 INSERT INTO partitioned2 VALUES (1, 'hello'); CREATE TABLE part2_1 PARTITION OF partitioned2 FOR VALUES FROM (-1, 'aaaaa') TO (100, 'ccccc'); +-- \d+ part2_1 DROP TABLE partitioned, partitioned2; @@ -241,6 +255,7 @@ create table partitioned2 partition of partitioned for values in ('(2,4)'); explain (costs off) select * from partitioned where partitioned = '(1,2)'::partitioned; +-- \d+ partitioned1 drop table partitioned; -- check that dependencies of partition columns are handled correctly @@ -289,6 +304,7 @@ CREATE TABLE part_p1 PARTITION OF list_parted FOR VALUES IN ('1'); CREATE TABLE part_p2 PARTITION OF list_parted FOR VALUES IN (2); CREATE TABLE part_p3 PARTITION OF list_parted FOR VALUES IN ((2+1)); CREATE TABLE part_null PARTITION OF list_parted FOR VALUES IN (null); +-- \d+ list_parted -- forbidden expressions for partition bound with list partitioned table CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (somename); @@ -302,7 +318,7 @@ CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN (genera CREATE TABLE part_bogus_expr_fail PARTITION OF list_parted FOR VALUES IN ((1+1) collate "POSIX"); -- syntax does not allow empty list of values for list partitions --- CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN (); +CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES IN (); -- trying to specify range for list partitioned table CREATE TABLE fail_part PARTITION OF list_parted FOR VALUES FROM (1) TO (2); -- trying to specify modulus and remainder for list partitioned table @@ -552,6 +568,7 @@ create table parted_notnull_inh_test (a int default 1, b int not null default 0) create table parted_notnull_inh_test1 partition of parted_notnull_inh_test (a not null, b default 1) for values in (1); insert into parted_notnull_inh_test (b) values (null); -- note that while b's default is overridden, a's default is preserved +-- \d parted_notnull_inh_test1 drop table parted_notnull_inh_test; -- check that collations are assigned in partition bound expressions @@ -589,23 +606,32 @@ create table test_part_coll_cast2 partition of test_part_coll_posix for values f drop table test_part_coll_posix; -- Partition bound in describe output +-- \d+ part_b -- Both partition bound and partition key in describe output +-- \d+ part_c -- a level-2 partition's constraint will include the parent's expressions +-- \d+ part_c_1_10 -- Show partition count in the parent's describe output -- Tempted to include \d+ output listing partitions with bound info but -- output could vary depending on the order in which partition oids are -- returned. +-- \d parted +-- \d hash_parted -- check that we get the expected partition constraints CREATE TABLE range_parted4 (a int, b int, c int) PARTITION BY RANGE (abs(a), abs(b), c); CREATE TABLE unbounded_range_part PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (MAXVALUE, MAXVALUE, MAXVALUE); +-- \d+ unbounded_range_part DROP TABLE unbounded_range_part; CREATE TABLE range_parted4_1 PARTITION OF range_parted4 FOR VALUES FROM (MINVALUE, MINVALUE, MINVALUE) TO (1, MAXVALUE, MAXVALUE); +-- \d+ range_parted4_1 CREATE TABLE range_parted4_2 PARTITION OF range_parted4 FOR VALUES FROM (3, 4, 5) TO (6, 7, MAXVALUE); +-- \d+ range_parted4_2 CREATE TABLE range_parted4_3 PARTITION OF range_parted4 FOR VALUES FROM (6, 8, MINVALUE) TO (9, MAXVALUE, MAXVALUE); +-- \d+ range_parted4_3 DROP TABLE range_parted4; -- user-defined operator class in partition key @@ -631,6 +657,7 @@ CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a); COMMENT ON TABLE parted_col_comment IS 'Am partitioned table'; COMMENT ON COLUMN parted_col_comment.a IS 'Partition key'; SELECT obj_description('parted_col_comment'::regclass); +-- \d+ parted_col_comment DROP TABLE parted_col_comment; -- specifying storage parameters for partitioned tables is not supported @@ -639,12 +666,14 @@ CREATE TABLE parted_col_comment (a int, b text) PARTITION BY LIST (a) WITH (fill -- list partitioning on array type column CREATE TABLE arrlp (a int[]) PARTITION BY LIST (a); CREATE TABLE arrlp12 PARTITION OF arrlp FOR VALUES IN ('{1}', '{2}'); +-- \d+ arrlp12 DROP TABLE arrlp; -- partition on boolean column create table boolspart (a bool) partition by list (a); create table boolspart_t partition of boolspart for values in (true); create table boolspart_f partition of boolspart for values in (false); +-- \d+ boolspart drop table boolspart; -- partitions mixing temporary and permanent relations @@ -712,4 +741,6 @@ create index part_column_drop_d_pred on part_column_drop(d) where d = 2; create index part_column_drop_d_expr on part_column_drop((d = 2)); create table part_column_drop_1_10 partition of part_column_drop for values from (1) to (10); +-- \d part_column_drop +-- \d part_column_drop_1_10 drop table part_column_drop; diff --git a/crates/squawk_parser/tests/data/regression_suite/create_table_like.sql b/crates/squawk_parser/tests/data/regression_suite/create_table_like.sql index 8ddb1839..c395305d 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_table_like.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_table_like.sql @@ -38,23 +38,29 @@ SELECT * FROM inhg; /* Two records with three columns in order x=x, xx=text, y=y DROP TABLE inhg; CREATE TABLE test_like_id_1 (a bigint GENERATED ALWAYS AS IDENTITY, b text); +-- \d test_like_id_1 INSERT INTO test_like_id_1 (b) VALUES ('b1'); SELECT * FROM test_like_id_1; CREATE TABLE test_like_id_2 (LIKE test_like_id_1); +-- \d test_like_id_2 INSERT INTO test_like_id_2 (b) VALUES ('b2'); SELECT * FROM test_like_id_2; -- identity was not copied CREATE TABLE test_like_id_3 (LIKE test_like_id_1 INCLUDING IDENTITY); +-- \d test_like_id_3 INSERT INTO test_like_id_3 (b) VALUES ('b3'); SELECT * FROM test_like_id_3; -- identity was copied and applied DROP TABLE test_like_id_1, test_like_id_2, test_like_id_3; CREATE TABLE test_like_gen_1 (a int, b int GENERATED ALWAYS AS (a * 2) STORED, c int GENERATED ALWAYS AS (a * 3) VIRTUAL); +-- \d test_like_gen_1 INSERT INTO test_like_gen_1 (a) VALUES (1); SELECT * FROM test_like_gen_1; CREATE TABLE test_like_gen_2 (LIKE test_like_gen_1); +-- \d test_like_gen_2 INSERT INTO test_like_gen_2 (a) VALUES (1); SELECT * FROM test_like_gen_2; CREATE TABLE test_like_gen_3 (LIKE test_like_gen_1 INCLUDING GENERATED); +-- \d test_like_gen_3 INSERT INTO test_like_gen_3 (a) VALUES (1); SELECT * FROM test_like_gen_3; DROP TABLE test_like_gen_1, test_like_gen_2, test_like_gen_3; @@ -63,16 +69,21 @@ DROP TABLE test_like_gen_1, test_like_gen_2, test_like_gen_3; CREATE TABLE test_like_4 (b int DEFAULT 42, c int GENERATED ALWAYS AS (a * 2) STORED, a int CHECK (a > 0)); +-- \d test_like_4 CREATE TABLE test_like_4a (LIKE test_like_4); CREATE TABLE test_like_4b (LIKE test_like_4 INCLUDING DEFAULTS); CREATE TABLE test_like_4c (LIKE test_like_4 INCLUDING GENERATED); CREATE TABLE test_like_4d (LIKE test_like_4 INCLUDING DEFAULTS INCLUDING GENERATED); +-- \d test_like_4a INSERT INTO test_like_4a (a) VALUES(11); SELECT a, b, c FROM test_like_4a; +-- \d test_like_4b INSERT INTO test_like_4b (a) VALUES(11); SELECT a, b, c FROM test_like_4b; +-- \d test_like_4c INSERT INTO test_like_4c (a) VALUES(11); SELECT a, b, c FROM test_like_4c; +-- \d test_like_4d INSERT INTO test_like_4d (a) VALUES(11); SELECT a, b, c FROM test_like_4d; @@ -82,12 +93,14 @@ CREATE TABLE test_like_5x (p int CHECK (p > 0), q int GENERATED ALWAYS AS (p * 2) STORED); CREATE TABLE test_like_5c (LIKE test_like_4 INCLUDING ALL) INHERITS (test_like_5, test_like_5x); +-- \d test_like_5c -- Test updating of column numbers in statistics expressions (bug #18468) CREATE TABLE test_like_6 (a int, c text, b text); CREATE STATISTICS ext_stat ON (a || b) FROM test_like_6; ALTER TABLE test_like_6 DROP COLUMN c; CREATE TABLE test_like_6c (LIKE test_like_6 INCLUDING ALL); +-- \d+ test_like_6c DROP TABLE test_like_4, test_like_4a, test_like_4b, test_like_4c, test_like_4d; DROP TABLE test_like_5, test_like_5x, test_like_5c; @@ -111,11 +124,13 @@ DROP TABLE inhz; /* Use primary key imported by LIKE for self-referential FK constraint */ CREATE TABLE inhz (x text REFERENCES inhz, LIKE inhx INCLUDING INDEXES); +-- \d inhz DROP TABLE inhz; -- including storage and comments CREATE TABLE ctlt1 (a text CHECK (length(a) > 2) ENFORCED PRIMARY KEY, b text CHECK (length(b) > 100) NOT ENFORCED); +ALTER TABLE ctlt1 ADD CONSTRAINT cc CHECK (length(b) > 100) NOT VALID; CREATE INDEX ctlt1_b_key ON ctlt1 (b); CREATE INDEX ctlt1_fnidx ON ctlt1 ((a || b)); CREATE STATISTICS ctlt1_a_b_stat ON a,b FROM ctlt1; @@ -129,9 +144,10 @@ COMMENT ON INDEX ctlt1_pkey IS 'index pkey'; COMMENT ON INDEX ctlt1_b_key IS 'index b_key'; ALTER TABLE ctlt1 ALTER COLUMN a SET STORAGE MAIN; -CREATE TABLE ctlt2 (c text); +CREATE TABLE ctlt2 (c text NOT NULL); ALTER TABLE ctlt2 ALTER COLUMN c SET STORAGE EXTERNAL; COMMENT ON COLUMN ctlt2.c IS 'C'; +COMMENT ON CONSTRAINT ctlt2_c_not_null ON ctlt2 IS 't2_c_not_null'; CREATE TABLE ctlt3 (a text CHECK (length(a) < 5), c text CHECK (length(c) < 7)); ALTER TABLE ctlt3 ALTER COLUMN c SET STORAGE EXTERNAL; @@ -145,14 +161,21 @@ CREATE TABLE ctlt4 (a text, c text); ALTER TABLE ctlt4 ALTER COLUMN c SET STORAGE EXTERNAL; CREATE TABLE ctlt12_storage (LIKE ctlt1 INCLUDING STORAGE, LIKE ctlt2 INCLUDING STORAGE); +-- \d+ ctlt12_storage CREATE TABLE ctlt12_comments (LIKE ctlt1 INCLUDING COMMENTS, LIKE ctlt2 INCLUDING COMMENTS); +-- \d+ ctlt12_comments +SELECT conname, description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt12_comments'::regclass; CREATE TABLE ctlt1_inh (LIKE ctlt1 INCLUDING CONSTRAINTS INCLUDING COMMENTS) INHERITS (ctlt1); +-- \d+ ctlt1_inh SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt1_inh'::regclass; CREATE TABLE ctlt13_inh () INHERITS (ctlt1, ctlt3); +-- \d+ ctlt13_inh CREATE TABLE ctlt13_like (LIKE ctlt3 INCLUDING CONSTRAINTS INCLUDING INDEXES INCLUDING COMMENTS INCLUDING STORAGE) INHERITS (ctlt1); +-- \d+ ctlt13_like SELECT description FROM pg_description, pg_constraint c WHERE classoid = 'pg_constraint'::regclass AND objoid = c.oid AND c.conrelid = 'ctlt13_like'::regclass; CREATE TABLE ctlt_all (LIKE ctlt1 INCLUDING ALL); +-- \d+ ctlt_all SELECT c.relname, objsubid, description FROM pg_description, pg_index i, pg_class c WHERE classoid = 'pg_class'::regclass AND objoid = i.indexrelid AND c.oid = i.indexrelid AND i.indrelid = 'ctlt_all'::regclass ORDER BY c.relname, objsubid; SELECT s.stxname, objsubid, description FROM pg_description, pg_statistic_ext s WHERE classoid = 'pg_statistic_ext'::regclass AND objoid = s.oid AND s.stxrelid = 'ctlt_all'::regclass ORDER BY s.stxname, objsubid; @@ -161,6 +184,7 @@ CREATE TABLE inh_error2 (LIKE ctlt4 INCLUDING STORAGE) INHERITS (ctlt1); -- Check that LIKE isn't confused by a system catalog of the same name CREATE TABLE pg_attrdef (LIKE ctlt1 INCLUDING ALL); +-- \d+ public.pg_attrdef DROP TABLE public.pg_attrdef; -- Check that LIKE isn't confused when new table masks the old, either @@ -168,6 +192,7 @@ BEGIN; CREATE SCHEMA ctl_schema; SET LOCAL search_path = ctl_schema, public; CREATE TABLE ctlt1 (LIKE ctlt1 INCLUDING ALL); +-- \d+ ctlt1 ROLLBACK; DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_inh, ctlt13_inh, ctlt13_like, ctlt_all, ctla, ctlb CASCADE; @@ -175,7 +200,18 @@ DROP TABLE ctlt1, ctlt2, ctlt3, ctlt4, ctlt12_storage, ctlt12_comments, ctlt1_in -- LIKE must respect NO INHERIT property of constraints CREATE TABLE noinh_con_copy (a int CHECK (a > 0) NO INHERIT, b int not null, c int not null no inherit); -CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS); + +COMMENT ON CONSTRAINT noinh_con_copy_b_not_null ON noinh_con_copy IS 'not null b'; +COMMENT ON CONSTRAINT noinh_con_copy_c_not_null ON noinh_con_copy IS 'not null c no inherit'; + +CREATE TABLE noinh_con_copy1 (LIKE noinh_con_copy INCLUDING CONSTRAINTS INCLUDING COMMENTS); +-- \d+ noinh_con_copy1 + +SELECT conname, description +FROM pg_description, pg_constraint c +WHERE classoid = 'pg_constraint'::regclass +AND objoid = c.oid AND c.conrelid = 'noinh_con_copy1'::regclass +ORDER BY conname COLLATE "C"; -- fail, as partitioned tables don't allow NO INHERIT constraints CREATE TABLE noinh_con_copy1_parted (LIKE noinh_con_copy INCLUDING ALL) @@ -221,9 +257,11 @@ CREATE STATISTICS ctl_table_stat ON a,b FROM ctl_table; ALTER TABLE ctl_table ADD CONSTRAINT foo CHECK (b = 'text'); ALTER TABLE ctl_table ALTER COLUMN b SET STORAGE MAIN; +-- \d+ ctl_table -- Test EXCLUDING ALL --- CREATE FOREIGN TABLE ctl_foreign_table1(LIKE ctl_table EXCLUDING ALL) SERVER ctl_s0; +CREATE FOREIGN TABLE ctl_foreign_table1(LIKE ctl_table EXCLUDING ALL) SERVER ctl_s0; +-- \d+ ctl_foreign_table1 -- \d+ does not report the value of attcompression for a foreign table, so -- check separately. SELECT attname, attcompression FROM pg_attribute @@ -231,7 +269,8 @@ SELECT attname, attcompression FROM pg_attribute -- Test INCLUDING ALL -- INDEXES, IDENTITY, COMPRESSION, STORAGE are not copied. --- CREATE FOREIGN TABLE ctl_foreign_table2(LIKE ctl_table INCLUDING ALL) SERVER ctl_s0; +CREATE FOREIGN TABLE ctl_foreign_table2(LIKE ctl_table INCLUDING ALL) SERVER ctl_s0; +-- \d+ ctl_foreign_table2 -- \d+ does not report the value of attcompression for a foreign table, so -- check separately. SELECT attname, attcompression FROM pg_attribute diff --git a/crates/squawk_parser/tests/data/regression_suite/create_type.sql b/crates/squawk_parser/tests/data/regression_suite/create_type.sql index 230964a2..24019f4c 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_type.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_type.sql @@ -3,7 +3,10 @@ -- -- directory path and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix -- -- Test the "old style" approach of making the I/O functions first, diff --git a/crates/squawk_parser/tests/data/regression_suite/create_view.sql b/crates/squawk_parser/tests/data/regression_suite/create_view.sql index 4efdae91..a1c37836 100644 --- a/crates/squawk_parser/tests/data/regression_suite/create_view.sql +++ b/crates/squawk_parser/tests/data/regression_suite/create_view.sql @@ -5,7 +5,11 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix CREATE FUNCTION interpt_pp(path, path) RETURNS point @@ -18,6 +22,7 @@ CREATE TABLE real_city ( outline path ); +-- \set filename :abs_srcdir '/data/real_city.data' COPY real_city FROM 'filename'; ANALYZE real_city; @@ -64,6 +69,12 @@ CREATE VIEW key_dependent_view_no_cols AS CREATE TABLE viewtest_tbl (a int, b int, c numeric(10,1), d text COLLATE "C"); +COPY viewtest_tbl FROM stdin; +-- 5 10 1.1 xy +-- 10 15 2.2 xyz +-- 15 20 3.3 xyzz +-- 20 25 4.4 xyzzy +-- \. CREATE OR REPLACE VIEW viewtest AS SELECT * FROM viewtest_tbl; @@ -168,8 +179,8 @@ CREATE VIEW v12_temp AS SELECT true FROM v11_temp; -- a view should also be temporary if it references a temporary sequence CREATE SEQUENCE seq1; CREATE TEMPORARY SEQUENCE seq1_temp; -CREATE VIEW v9 AS SELECT seq1.is_called FROM seq1; -CREATE VIEW v13_temp AS SELECT seq1_temp.is_called FROM seq1_temp; +CREATE VIEW v9 AS SELECT nextval('seq1'); +CREATE VIEW v13_temp AS SELECT nextval('seq1_temp'); SELECT relname FROM pg_class WHERE relname LIKE 'v_' @@ -284,6 +295,7 @@ SELECT relname, relkind, reloptions FROM pg_class CREATE VIEW unspecified_types AS SELECT 42 as i, 42.5 as num, 'foo' as u, 'foo'::unknown as u2, null as n; +-- \d+ unspecified_types SELECT * FROM unspecified_types; -- This test checks that proper typmods are assigned in a multi-row VALUES @@ -294,6 +306,7 @@ CREATE VIEW tt1 AS ('abc'::varchar(3), '0123456789', 42, 'abcd'::varchar(4)), ('0123456789', 'abc'::varchar(3), 42.12, 'abc'::varchar(4)) ) vv(a,b,c,d); +-- \d+ tt1 SELECT * FROM tt1; SELECT a::varchar(3) FROM tt1; DROP VIEW tt1; @@ -317,24 +330,48 @@ CREATE VIEW aliased_view_4 AS select * from temp_view_test.tt1 where exists (select 1 from tt1 where temp_view_test.tt1.y1 = tt1.f1); +-- \d+ aliased_view_1 +-- \d+ aliased_view_2 +-- \d+ aliased_view_3 +-- \d+ aliased_view_4 ALTER TABLE tx1 RENAME TO a1; +-- \d+ aliased_view_1 +-- \d+ aliased_view_2 +-- \d+ aliased_view_3 +-- \d+ aliased_view_4 ALTER TABLE tt1 RENAME TO a2; +-- \d+ aliased_view_1 +-- \d+ aliased_view_2 +-- \d+ aliased_view_3 +-- \d+ aliased_view_4 ALTER TABLE a1 RENAME TO tt1; +-- \d+ aliased_view_1 +-- \d+ aliased_view_2 +-- \d+ aliased_view_3 +-- \d+ aliased_view_4 ALTER TABLE a2 RENAME TO tx1; ALTER TABLE tx1 SET SCHEMA temp_view_test; +-- \d+ aliased_view_1 +-- \d+ aliased_view_2 +-- \d+ aliased_view_3 +-- \d+ aliased_view_4 ALTER TABLE temp_view_test.tt1 RENAME TO tmp1; ALTER TABLE temp_view_test.tmp1 SET SCHEMA testviewschm2; ALTER TABLE tmp1 RENAME TO tx1; +-- \d+ aliased_view_1 +-- \d+ aliased_view_2 +-- \d+ aliased_view_3 +-- \d+ aliased_view_4 -- Test correct deparsing of ORDER BY when there is an output name conflict @@ -342,12 +379,15 @@ create view aliased_order_by as select x1 as x2, x2 as x1, x3 from tt1 order by x2; -- this is interpreted per SQL92, so really ordering by x1 +-- \d+ aliased_order_by alter view aliased_order_by rename column x1 to x0; +-- \d+ aliased_order_by alter view aliased_order_by rename column x3 to x1; +-- \d+ aliased_order_by -- Test aliasing of joins @@ -356,6 +396,7 @@ select * from (select * from (tbl1 cross join tbl2) same) ss, (tbl3 cross join tbl4) same; +-- \d+ view_of_joins create table tbl1a (a int, c int); create view view_of_joins_2a as select * from tbl1 join tbl1a using (a); @@ -640,6 +681,7 @@ select * from int8_tbl i where i.* in (values(i.*::int8_tbl)); create table tt15v_log(o tt15v, n tt15v, incr bool); create rule updlog as on update to tt15v do also insert into tt15v_log values(old, new, row(old,old) < row(new,new)); +-- \d+ tt15v -- check unique-ification of overlength names diff --git a/crates/squawk_parser/tests/data/regression_suite/database.sql b/crates/squawk_parser/tests/data/regression_suite/database.sql index 46ad2634..4ef36127 100644 --- a/crates/squawk_parser/tests/data/regression_suite/database.sql +++ b/crates/squawk_parser/tests/data/regression_suite/database.sql @@ -2,7 +2,7 @@ CREATE DATABASE regression_tbd ENCODING utf8 LC_COLLATE "C" LC_CTYPE "C" TEMPLATE template0; ALTER DATABASE regression_tbd RENAME TO regression_utf8; ALTER DATABASE regression_utf8 SET TABLESPACE regress_tblspace; -ALTER DATABASE regression_utf8 RESET TABLESPACE; +ALTER DATABASE regression_utf8 SET TABLESPACE pg_default; ALTER DATABASE regression_utf8 CONNECTION_LIMIT 123; -- Test PgDatabaseToastTable. Doing this with GRANT would be slow. @@ -11,7 +11,7 @@ UPDATE pg_database SET datacl = array_fill(makeaclitem(10, 10, 'USAGE', false), ARRAY[5e5::int]) WHERE datname = 'regression_utf8'; -- load catcache entry, if nothing else does -ALTER DATABASE regression_utf8 RESET TABLESPACE; +ALTER DATABASE regression_utf8 RENAME TO regression_rename_rolled_back; ROLLBACK; CREATE ROLE regress_datdba_before; diff --git a/crates/squawk_parser/tests/data/regression_suite/dependency.sql b/crates/squawk_parser/tests/data/regression_suite/dependency.sql index 32980f1b..b1afdfda 100644 --- a/crates/squawk_parser/tests/data/regression_suite/dependency.sql +++ b/crates/squawk_parser/tests/data/regression_suite/dependency.sql @@ -35,8 +35,10 @@ DROP USER regress_dep_user2; -- can't drop the owner of an object -- the error message detail here would include a pg_toast_nnn name that -- is not constant, so suppress it +-- \set VERBOSITY terse ALTER TABLE deptest OWNER TO regress_dep_user3; DROP USER regress_dep_user3; +-- \set VERBOSITY default -- if we drop the object, we can drop the user too DROP TABLE deptest; @@ -62,10 +64,13 @@ SET SESSION AUTHORIZATION regress_dep_user1; CREATE TABLE deptest (a serial primary key, b text); GRANT ALL ON deptest1 TO regress_dep_user2; RESET SESSION AUTHORIZATION; +-- \z deptest1 DROP OWNED BY regress_dep_user1; -- all grants revoked +-- \z deptest1 -- table was dropped +-- \d deptest -- Test REASSIGN OWNED GRANT ALL ON deptest1 TO regress_dep_user1; @@ -95,6 +100,7 @@ FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; RESET SESSION AUTHORIZATION; REASSIGN OWNED BY regress_dep_user1 TO regress_dep_user2; +-- \dt deptest SELECT typowner = relowner FROM pg_type JOIN pg_class c ON typrelid = c.oid WHERE typname = 'deptest_t'; diff --git a/crates/squawk_parser/tests/data/regression_suite/domain.sql b/crates/squawk_parser/tests/data/regression_suite/domain.sql index a6af5604..1f223706 100644 --- a/crates/squawk_parser/tests/data/regression_suite/domain.sql +++ b/crates/squawk_parser/tests/data/regression_suite/domain.sql @@ -28,7 +28,7 @@ create domain d_fail as anyelement; create domain d_fail as int4 unique; create domain d_fail as int4 PRIMARY key; create domain d_fail as int4 constraint cc generated by default as identity; --- create domain d_fail as int4 constraint cc check (values > 1) no inherit; +create domain d_fail as int4 constraint cc check (values > 1) no inherit; create domain d_fail as int4 constraint cc check (values > 1) deferrable; -- Test domain input. @@ -58,7 +58,13 @@ INSERT INTO basictest values ('88', 'haha', 'short text', '123.12'); -- Bad varc INSERT INTO basictest values ('88', 'haha', 'short', '123.1212'); -- Truncate numeric -- Test copy +COPY basictest (testvarchar) FROM stdin; -- fail +-- notsoshorttext +-- \. +COPY basictest (testvarchar) FROM stdin; +-- short +-- \. select * from basictest; @@ -116,7 +122,14 @@ select * from domarrtest; select testint4arr[1], testchar4arr[2:2] from domarrtest; select array_dims(testint4arr), array_dims(testchar4arr) from domarrtest; +COPY domarrtest FROM stdin; +-- {3,4} {q,w,e} +-- \N \N +-- \. +COPY domarrtest FROM stdin; -- fail +-- {3,4} {qwerty,w,e} +-- \. select * from domarrtest; @@ -172,6 +185,7 @@ explain (verbose, costs off) update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; create rule silly as on delete to dcomptable do instead update dcomptable set d1.r = (d1).r - 1, d1.i = (d1).i + 1 where (d1).i > 0; +-- \d+ dcomptable create function makedcomp(r float8, i float8) returns dcomptype as 'select row(r, i)' language sql; @@ -246,6 +260,7 @@ explain (verbose, costs off) create rule silly as on delete to dcomptable do instead update dcomptable set d1[1].r = d1[1].r - 1, d1[1].i = d1[1].i + 1 where d1[1].i > 0; +-- \d+ dcomptable drop table dcomptable; drop type comptype cascade; @@ -338,9 +353,20 @@ INSERT INTO nulltest values ('a', 'b', NULL, 'd', 'c'); INSERT INTO nulltest values ('a', 'b', 'c', NULL, 'd'); -- Good -- Test copy +COPY nulltest FROM stdin; --fail +-- a b \N d d +-- \. +COPY nulltest FROM stdin; --fail +-- a b c d \N +-- \. -- Last row is bad +COPY nulltest FROM stdin; +-- a b c \N c +-- a b c \N d +-- a b c \N a +-- \. select * from nulltest; @@ -386,6 +412,9 @@ insert into defaulttest default values; insert into defaulttest default values; -- Test defaults with copy +COPY defaulttest(col5) FROM stdin; +-- 42 +-- \. select * from defaulttest; @@ -454,6 +483,7 @@ alter domain con add constraint t check (VALUE < 1); -- fails alter domain con add constraint t check (VALUE < 34); alter domain con add check (VALUE > 0); +-- \dD con insert into domcontest values (-5); -- fails insert into domcontest values (42); -- fails @@ -486,6 +516,7 @@ select count(*) from pg_constraint where contypid = 'connotnull'::regtype and co alter domain connotnull add constraint constr1bis not null; -- redundant select count(*) from pg_constraint where contypid = 'connotnull'::regtype and contype = 'n'; +-- \dD connotnull update domconnotnulltest set col1 = null; -- fails @@ -571,6 +602,9 @@ insert into domain_test values (1, 2); -- should fail alter table domain_test add column c str_domain; +-- disallow duplicated not-null constraints +create domain int_domain1 as int constraint nn1 not null constraint nn2 not null; + create domain str_domain2 as text check (value <> 'foo') default 'foo'; -- should fail diff --git a/crates/squawk_parser/tests/data/regression_suite/eager_aggregate.sql b/crates/squawk_parser/tests/data/regression_suite/eager_aggregate.sql new file mode 100644 index 00000000..abe6d6ae --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/eager_aggregate.sql @@ -0,0 +1,377 @@ +-- +-- EAGER AGGREGATION +-- Test we can push aggregation down below join +-- + +CREATE TABLE eager_agg_t1 (a int, b int, c double precision); +CREATE TABLE eager_agg_t2 (a int, b int, c double precision); +CREATE TABLE eager_agg_t3 (a int, b int, c double precision); + +INSERT INTO eager_agg_t1 SELECT i, i, i FROM generate_series(1, 1000) i; +INSERT INTO eager_agg_t2 SELECT i, i%10, i FROM generate_series(1, 1000) i; +INSERT INTO eager_agg_t3 SELECT i%10, i%10, i FROM generate_series(1, 1000) i; + +ANALYZE eager_agg_t1; +ANALYZE eager_agg_t2; +ANALYZE eager_agg_t3; + + +-- +-- Test eager aggregation over base rel +-- + +-- Perform scan of a table, aggregate the result, join it to the other table +-- and finalize the aggregation. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +-- Produce results with sorting aggregation +SET enable_hashagg TO off; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +RESET enable_hashagg; + + +-- +-- Test eager aggregation over join rel +-- + +-- Perform join of tables, aggregate the result, join it to the other table +-- and finalize the aggregation. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, avg(t2.c + t3.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b + JOIN eager_agg_t3 t3 ON t2.a = t3.a +GROUP BY t1.a ORDER BY t1.a; + +SELECT t1.a, avg(t2.c + t3.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b + JOIN eager_agg_t3 t3 ON t2.a = t3.a +GROUP BY t1.a ORDER BY t1.a; + +-- Produce results with sorting aggregation +SET enable_hashagg TO off; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, avg(t2.c + t3.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b + JOIN eager_agg_t3 t3 ON t2.a = t3.a +GROUP BY t1.a ORDER BY t1.a; + +SELECT t1.a, avg(t2.c + t3.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b + JOIN eager_agg_t3 t3 ON t2.a = t3.a +GROUP BY t1.a ORDER BY t1.a; + +RESET enable_hashagg; + + +-- +-- Test that eager aggregation works for outer join +-- + +-- Ensure aggregation can be pushed down to the non-nullable side +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + RIGHT JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + RIGHT JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +-- Ensure aggregation cannot be pushed down to the nullable side +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t2.b, avg(t2.c) + FROM eager_agg_t1 t1 + LEFT JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t2.b ORDER BY t2.b; + +SELECT t2.b, avg(t2.c) + FROM eager_agg_t1 t1 + LEFT JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t2.b ORDER BY t2.b; + + +-- +-- Test that eager aggregation works for parallel plans +-- + +SET parallel_setup_cost=0; +SET parallel_tuple_cost=0; +SET min_parallel_table_scan_size=0; +SET max_parallel_workers_per_gather=4; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +RESET parallel_setup_cost; +RESET parallel_tuple_cost; +RESET min_parallel_table_scan_size; +RESET max_parallel_workers_per_gather; + +-- +-- Test eager aggregation with GEQO +-- + +SET geqo = on; +SET geqo_threshold = 2; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +SELECT t1.a, avg(t2.c) + FROM eager_agg_t1 t1 + JOIN eager_agg_t2 t2 ON t1.b = t2.b +GROUP BY t1.a ORDER BY t1.a; + +RESET geqo; +RESET geqo_threshold; + +DROP TABLE eager_agg_t1; +DROP TABLE eager_agg_t2; +DROP TABLE eager_agg_t3; + + +-- +-- Test eager aggregation for partitionwise join +-- + +-- Enable partitionwise aggregate, which by default is disabled. +SET enable_partitionwise_aggregate TO true; +-- Enable partitionwise join, which by default is disabled. +SET enable_partitionwise_join TO true; + +CREATE TABLE eager_agg_tab1(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE eager_agg_tab1_p1 PARTITION OF eager_agg_tab1 FOR VALUES FROM (0) TO (5); +CREATE TABLE eager_agg_tab1_p2 PARTITION OF eager_agg_tab1 FOR VALUES FROM (5) TO (10); +CREATE TABLE eager_agg_tab1_p3 PARTITION OF eager_agg_tab1 FOR VALUES FROM (10) TO (15); +CREATE TABLE eager_agg_tab2(x int, y int) PARTITION BY RANGE(y); +CREATE TABLE eager_agg_tab2_p1 PARTITION OF eager_agg_tab2 FOR VALUES FROM (0) TO (5); +CREATE TABLE eager_agg_tab2_p2 PARTITION OF eager_agg_tab2 FOR VALUES FROM (5) TO (10); +CREATE TABLE eager_agg_tab2_p3 PARTITION OF eager_agg_tab2 FOR VALUES FROM (10) TO (15); +INSERT INTO eager_agg_tab1 SELECT i % 15, i % 10 FROM generate_series(1, 1000) i; +INSERT INTO eager_agg_tab2 SELECT i % 10, i % 15 FROM generate_series(1, 1000) i; + +ANALYZE eager_agg_tab1; +ANALYZE eager_agg_tab2; + +-- When GROUP BY clause matches; full aggregation is performed for each +-- partition. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.x, sum(t1.y), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t1.x ORDER BY t1.x; + +SELECT t1.x, sum(t1.y), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t1.x ORDER BY t1.x; + +-- GROUP BY having other matching key +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t2.y, sum(t1.y), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t2.y ORDER BY t2.y; + +SELECT t2.y, sum(t1.y), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t2.y ORDER BY t2.y; + +-- When GROUP BY clause does not match; partial aggregation is performed for +-- each partition. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t2.x, sum(t1.x), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t2.x HAVING avg(t1.x) > 5 ORDER BY t2.x; + +SELECT t2.x, sum(t1.x), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t2.x HAVING avg(t1.x) > 5 ORDER BY t2.x; + +-- Check with eager aggregation over join rel +-- full aggregation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.x, sum(t2.y + t3.y) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab1 t2 ON t1.x = t2.x + JOIN eager_agg_tab1 t3 ON t2.x = t3.x +GROUP BY t1.x ORDER BY t1.x; + +SELECT t1.x, sum(t2.y + t3.y) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab1 t2 ON t1.x = t2.x + JOIN eager_agg_tab1 t3 ON t2.x = t3.x +GROUP BY t1.x ORDER BY t1.x; + +-- partial aggregation +SET enable_hashagg TO off; +SET max_parallel_workers_per_gather TO 0; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t3.y, sum(t2.y + t3.y) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab1 t2 ON t1.x = t2.x + JOIN eager_agg_tab1 t3 ON t2.x = t3.x +GROUP BY t3.y ORDER BY t3.y; + +SELECT t3.y, sum(t2.y + t3.y) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab1 t2 ON t1.x = t2.x + JOIN eager_agg_tab1 t3 ON t2.x = t3.x +GROUP BY t3.y ORDER BY t3.y; + +RESET enable_hashagg; +RESET max_parallel_workers_per_gather; + +-- try that with GEQO too +SET geqo = on; +SET geqo_threshold = 2; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.x, sum(t1.y), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t1.x ORDER BY t1.x; + +SELECT t1.x, sum(t1.y), count(*) + FROM eager_agg_tab1 t1 + JOIN eager_agg_tab2 t2 ON t1.x = t2.y +GROUP BY t1.x ORDER BY t1.x; + +RESET geqo; +RESET geqo_threshold; + +DROP TABLE eager_agg_tab1; +DROP TABLE eager_agg_tab2; + + +-- +-- Test with multi-level partitioning scheme +-- +CREATE TABLE eager_agg_tab_ml(x int, y int) PARTITION BY RANGE(x); +CREATE TABLE eager_agg_tab_ml_p1 PARTITION OF eager_agg_tab_ml FOR VALUES FROM (0) TO (10); +CREATE TABLE eager_agg_tab_ml_p2 PARTITION OF eager_agg_tab_ml FOR VALUES FROM (10) TO (20) PARTITION BY RANGE(x); +CREATE TABLE eager_agg_tab_ml_p2_s1 PARTITION OF eager_agg_tab_ml_p2 FOR VALUES FROM (10) TO (15); +CREATE TABLE eager_agg_tab_ml_p2_s2 PARTITION OF eager_agg_tab_ml_p2 FOR VALUES FROM (15) TO (20); +CREATE TABLE eager_agg_tab_ml_p3 PARTITION OF eager_agg_tab_ml FOR VALUES FROM (20) TO (30) PARTITION BY RANGE(x); +CREATE TABLE eager_agg_tab_ml_p3_s1 PARTITION OF eager_agg_tab_ml_p3 FOR VALUES FROM (20) TO (25); +CREATE TABLE eager_agg_tab_ml_p3_s2 PARTITION OF eager_agg_tab_ml_p3 FOR VALUES FROM (25) TO (30); +INSERT INTO eager_agg_tab_ml SELECT i % 30, i % 30 FROM generate_series(1, 1000) i; + +ANALYZE eager_agg_tab_ml; + +-- When GROUP BY clause matches; full aggregation is performed for each +-- partition. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.x, sum(t2.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x +GROUP BY t1.x ORDER BY t1.x; + +SELECT t1.x, sum(t2.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x +GROUP BY t1.x ORDER BY t1.x; + +-- When GROUP BY clause does not match; partial aggregation is performed for +-- each partition. +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.y, sum(t2.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x +GROUP BY t1.y ORDER BY t1.y; + +SELECT t1.y, sum(t2.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x +GROUP BY t1.y ORDER BY t1.y; + +-- Check with eager aggregation over join rel +-- full aggregation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.x, sum(t2.y + t3.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x + JOIN eager_agg_tab_ml t3 ON t2.x = t3.x +GROUP BY t1.x ORDER BY t1.x; + +SELECT t1.x, sum(t2.y + t3.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x + JOIN eager_agg_tab_ml t3 ON t2.x = t3.x +GROUP BY t1.x ORDER BY t1.x; + +-- partial aggregation +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t3.y, sum(t2.y + t3.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x + JOIN eager_agg_tab_ml t3 ON t2.x = t3.x +GROUP BY t3.y ORDER BY t3.y; + +SELECT t3.y, sum(t2.y + t3.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x + JOIN eager_agg_tab_ml t3 ON t2.x = t3.x +GROUP BY t3.y ORDER BY t3.y; + +-- try that with GEQO too +SET geqo = on; +SET geqo_threshold = 2; + +EXPLAIN (VERBOSE, COSTS OFF) +SELECT t1.x, sum(t2.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x +GROUP BY t1.x ORDER BY t1.x; + +SELECT t1.x, sum(t2.y), count(*) + FROM eager_agg_tab_ml t1 + JOIN eager_agg_tab_ml t2 ON t1.x = t2.x +GROUP BY t1.x ORDER BY t1.x; + +RESET geqo; +RESET geqo_threshold; + +DROP TABLE eager_agg_tab_ml; diff --git a/crates/squawk_parser/tests/data/regression_suite/enum.sql b/crates/squawk_parser/tests/data/regression_suite/enum.sql index 3bec592c..59559661 100644 --- a/crates/squawk_parser/tests/data/regression_suite/enum.sql +++ b/crates/squawk_parser/tests/data/regression_suite/enum.sql @@ -19,7 +19,12 @@ SELECT 'mauve'::rainbow; SELECT pg_input_is_valid('red', 'rainbow'); SELECT pg_input_is_valid('mauve', 'rainbow'); SELECT * FROM pg_input_error_info('mauve', 'rainbow'); +-- \x SELECT * FROM pg_input_error_info(repeat('too_long', 32), 'rainbow'); +-- \x + +-- check for duplicate enum entries +CREATE TYPE dup_enum AS ENUM ('foo','bar','foo'); -- -- adding new values @@ -128,6 +133,10 @@ ORDER BY enumsortorder; -- CREATE TABLE enumtest (col rainbow); INSERT INTO enumtest values ('red'), ('orange'), ('yellow'), ('green'); +COPY enumtest FROM stdin; +-- blue +-- purple +-- \. SELECT * FROM enumtest; -- diff --git a/crates/squawk_parser/tests/data/regression_suite/errors.sql b/crates/squawk_parser/tests/data/regression_suite/errors.sql index a828bbd0..67194f60 100644 --- a/crates/squawk_parser/tests/data/regression_suite/errors.sql +++ b/crates/squawk_parser/tests/data/regression_suite/errors.sql @@ -26,7 +26,7 @@ select * from nonesuch; select nonesuch from pg_database; -- empty distinct list isn't OK --- select distinct from pg_database; +select distinct from pg_database; -- bad attribute name on lhs of operator select * from pg_database where nonesuch = pg_database.datname; @@ -46,7 +46,7 @@ select null from pg_database group by grouping sets (()) for update; -- DELETE -- missing relation name (this had better not wildcard!) --- delete from; +delete from; -- no such relation delete from nonesuch; @@ -56,7 +56,7 @@ delete from nonesuch; -- DROP -- missing relation name (this had better not wildcard!) --- drop table; +drop table; -- no such relation drop table nonesuch; @@ -128,10 +128,10 @@ create aggregate newcnt1 (sfunc = int4inc, -- DROP INDEX -- missing index name --- drop index; +drop index; -- bad index name --- drop index 314159; +drop index 314159; -- no such index drop index nonesuch; @@ -141,13 +141,13 @@ drop index nonesuch; -- DROP AGGREGATE -- missing aggregate name --- drop aggregate; +drop aggregate; -- missing aggregate type --- drop aggregate newcnt1; +drop aggregate newcnt1; -- bad aggregate name --- drop aggregate 314159 (int); +drop aggregate 314159 (int); -- bad aggregate type drop aggregate newcnt (nonesuch); @@ -163,10 +163,10 @@ drop aggregate newcnt (float4); -- DROP FUNCTION -- missing function name --- drop function (); +drop function (); -- bad function name --- drop function 314159(); +drop function 314159(); -- no such function drop function nonesuch(); @@ -176,10 +176,10 @@ drop function nonesuch(); -- DROP TYPE -- missing type name --- drop type; +drop type; -- bad type name --- drop type 314159; +drop type 314159; -- no such type drop type nonesuch; @@ -189,34 +189,34 @@ drop type nonesuch; -- DROP OPERATOR -- missing everything --- drop operator; +drop operator; -- bad operator name --- drop operator equals; +drop operator equals; -- missing type list --- drop operator ===; +drop operator ===; -- missing parentheses --- drop operator int4, int4; +drop operator int4, int4; -- missing operator name --- drop operator (int4, int4); +drop operator (int4, int4); -- missing type list contents --- drop operator === (); +drop operator === (); -- no such operator --- drop operator === (int4); +drop operator === (int4); -- no such operator by that name drop operator === (int4, int4); -- no such type1 --- drop operator = (nonesuch); +drop operator = (nonesuch); -- no such type1 --- drop operator = ( , int4); +drop operator = ( , int4); -- no such type1 drop operator = (nonesuch, int4); @@ -225,25 +225,25 @@ drop operator = (nonesuch, int4); drop operator = (int4, nonesuch); -- no such type2 --- drop operator = (int4, ); +drop operator = (int4, ); -- -- DROP RULE -- missing rule name --- drop rule; +drop rule; -- bad rule name --- drop rule 314159; +drop rule 314159; -- no such rule drop rule nonesuch on noplace; -- these postquel variants are no longer supported --- drop tuple rule nonesuch; --- drop instance rule nonesuch on noplace; --- drop rewrite rule nonesuch; +drop tuple rule nonesuch; +drop instance rule nonesuch on noplace; +drop rewrite rule nonesuch; -- -- Check that division-by-zero is properly caught. @@ -272,98 +272,99 @@ select 1::float4/0; select 1/0::float4; --- -- --- -- Test psql's reporting of syntax error location --- -- - --- xxx; - --- CREATE foo; - --- CREATE TABLE ; - --- CREATE TABLE - --- INSERT INTO foo VALUES(123) foo; - --- INSERT INTO 123 --- VALUES(123); - --- INSERT INTO foo --- VALUES(123) 123 --- ; - --- -- with a tab --- CREATE TABLE foo --- (id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, --- id3 INTEGER NOT NUL, --- id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); - --- -- long line to be truncated on the left --- CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, --- id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); - --- -- long line to be truncated on the right --- CREATE TABLE foo( --- id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY); - --- -- long line to be truncated both ways --- CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); - --- -- long line to be truncated on the left, many lines --- CREATE --- TEMPORARY --- TABLE --- foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, --- id4 INT4 --- UNIQUE --- NOT --- NULL, --- id5 TEXT --- UNIQUE --- NOT --- NULL) --- ; - --- -- long line to be truncated on the right, many lines --- CREATE --- TEMPORARY --- TABLE --- foo( --- id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY) --- ; - --- -- long line to be truncated both ways, many lines --- CREATE --- TEMPORARY --- TABLE --- foo --- (id --- INT4 --- UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, --- idz INT4 UNIQUE NOT NULL, --- idv INT4 UNIQUE NOT NULL); +-- +-- Test psql's reporting of syntax error location +-- + +xxx; + +CREATE foo; + +CREATE TABLE ; + +CREATE TABLE +-- \g + +INSERT INTO foo VALUES(123) foo; + +INSERT INTO 123 +VALUES(123); + +INSERT INTO foo +VALUES(123) 123 +; + +-- with a tab +CREATE TABLE foo + (id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, + id3 INTEGER NOT NUL, + id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); + +-- long line to be truncated on the left +CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); + +-- long line to be truncated on the right +CREATE TABLE foo( +id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY); + +-- long line to be truncated both ways +CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL); + +-- long line to be truncated on the left, many lines +CREATE +TEMPORARY +TABLE +foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, +id4 INT4 +UNIQUE +NOT +NULL, +id5 TEXT +UNIQUE +NOT +NULL) +; + +-- long line to be truncated on the right, many lines +CREATE +TEMPORARY +TABLE +foo( +id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY) +; + +-- long line to be truncated both ways, many lines +CREATE +TEMPORARY +TABLE +foo +(id +INT4 +UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, +idv INT4 UNIQUE NOT NULL); -- more than 10 lines... --- CREATE --- TEMPORARY --- TABLE --- foo --- (id --- INT4 --- UNIQUE --- NOT --- NULL --- , --- idm --- INT4 --- UNIQUE --- NOT --- NULL, --- idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, --- idz INT4 UNIQUE NOT NULL, --- idv --- INT4 --- UNIQUE --- NOT --- NULL); +CREATE +TEMPORARY +TABLE +foo +(id +INT4 +UNIQUE +NOT +NULL +, +idm +INT4 +UNIQUE +NOT +NULL, +idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, +idz INT4 UNIQUE NOT NULL, +idv +INT4 +UNIQUE +NOT +NULL); diff --git a/crates/squawk_parser/tests/data/regression_suite/event_trigger.sql b/crates/squawk_parser/tests/data/regression_suite/event_trigger.sql index 013546b8..c613c0cf 100644 --- a/crates/squawk_parser/tests/data/regression_suite/event_trigger.sql +++ b/crates/squawk_parser/tests/data/regression_suite/event_trigger.sql @@ -202,9 +202,15 @@ INSERT INTO undroppable_objs VALUES ('table', 'audit_tbls.schema_two_table_three'); CREATE TABLE dropped_objects ( - type text, - schema text, - object text + object_type text, + schema_name text, + object_name text, + object_identity text, + address_names text[], + address_args text[], + is_temporary bool, + original bool, + normal bool ); -- This tests errors raised within event triggers; the one in audit_tbls @@ -245,8 +251,12 @@ BEGIN END IF; INSERT INTO dropped_objects - (type, schema, object) VALUES - (obj.object_type, obj.schema_name, obj.object_identity); + (object_type, schema_name, object_name, + object_identity, address_names, address_args, + is_temporary, original, normal) VALUES + (obj.object_type, obj.schema_name, obj.object_name, + obj.object_identity, obj.address_names, obj.address_args, + obj.is_temporary, obj.original, obj.normal); END LOOP; END $$; @@ -263,10 +273,12 @@ DROP SCHEMA schema_one, schema_two CASCADE; DELETE FROM undroppable_objs WHERE object_identity = 'schema_one.table_three'; DROP SCHEMA schema_one, schema_two CASCADE; -SELECT * FROM dropped_objects WHERE schema IS NULL OR schema <> 'pg_toast'; +-- exclude TOAST objects because they have unstable names +SELECT * FROM dropped_objects + WHERE schema_name IS NULL OR schema_name <> 'pg_toast'; DROP OWNED BY regress_evt_user; -SELECT * FROM dropped_objects WHERE type = 'schema'; +SELECT * FROM dropped_objects WHERE object_type = 'schema'; DROP ROLE regress_evt_user; @@ -285,9 +297,10 @@ BEGIN IF NOT r.normal AND NOT r.original THEN CONTINUE; END IF; - RAISE NOTICE 'NORMAL: orig=% normal=% istemp=% type=% identity=% name=% args=%', + RAISE NOTICE 'NORMAL: orig=% normal=% istemp=% type=% identity=% schema=% name=% addr=% args=%', r.original, r.normal, r.is_temporary, r.object_type, - r.object_identity, r.address_names, r.address_args; + r.object_identity, r.schema_name, r.object_name, + r.address_names, r.address_args; END LOOP; END; $$; CREATE EVENT TRIGGER regress_event_trigger_report_dropped ON sql_drop @@ -337,6 +350,46 @@ DROP INDEX evttrig.one_idx; DROP SCHEMA evttrig CASCADE; DROP TABLE a_temp_tbl; +-- check unfiltered results, too +CREATE OR REPLACE FUNCTION event_trigger_report_dropped() + RETURNS event_trigger + LANGUAGE plpgsql +AS $$ +DECLARE r record; +BEGIN + FOR r IN SELECT * from pg_event_trigger_dropped_objects() + LOOP + RAISE NOTICE 'DROP: orig=% normal=% istemp=% type=% identity=% schema=% name=% addr=% args=%', + r.original, r.normal, r.is_temporary, r.object_type, + r.object_identity, r.schema_name, r.object_name, + r.address_names, r.address_args; + END LOOP; +END; $$; + +CREATE FUNCTION event_trigger_dummy_trigger() + RETURNS trigger + LANGUAGE plpgsql +AS $$ +BEGIN + RETURN new; +END; $$; + +CREATE TABLE evtrg_nontemp_table (f1 int primary key, f2 int default 42); +CREATE TRIGGER evtrg_nontemp_trig + BEFORE INSERT ON evtrg_nontemp_table + EXECUTE FUNCTION event_trigger_dummy_trigger(); +CREATE POLICY evtrg_nontemp_pol ON evtrg_nontemp_table USING (f2 > 0); +DROP TABLE evtrg_nontemp_table; + +CREATE TEMP TABLE a_temp_tbl (f1 int primary key, f2 int default 42); +CREATE TRIGGER a_temp_trig + BEFORE INSERT ON a_temp_tbl + EXECUTE FUNCTION event_trigger_dummy_trigger(); +CREATE POLICY a_temp_pol ON a_temp_tbl USING (f2 > 0); +DROP TABLE a_temp_tbl; + +DROP FUNCTION event_trigger_dummy_trigger(); + -- CREATE OPERATOR CLASS without FAMILY clause should report -- both CREATE OPERATOR FAMILY and CREATE OPERATOR CLASS CREATE OPERATOR CLASS evttrigopclass FOR TYPE int USING btree AS STORAGE int; diff --git a/crates/squawk_parser/tests/data/regression_suite/event_trigger_login.sql b/crates/squawk_parser/tests/data/regression_suite/event_trigger_login.sql index 1a74a7e7..69eb418a 100644 --- a/crates/squawk_parser/tests/data/regression_suite/event_trigger_login.sql +++ b/crates/squawk_parser/tests/data/regression_suite/event_trigger_login.sql @@ -9,7 +9,9 @@ END; $$ LANGUAGE plpgsql; CREATE EVENT TRIGGER on_login_trigger ON login EXECUTE PROCEDURE on_login_proc(); ALTER EVENT TRIGGER on_login_trigger ENABLE ALWAYS; +-- \c SELECT COUNT(*) FROM user_logins; +-- \c SELECT COUNT(*) FROM user_logins; -- Check dathasloginevt in system catalog @@ -19,3 +21,4 @@ SELECT dathasloginevt FROM pg_database WHERE datname= 'DBNAME'; DROP TABLE user_logins; DROP EVENT TRIGGER on_login_trigger; DROP FUNCTION on_login_proc(); +-- \c diff --git a/crates/squawk_parser/tests/data/regression_suite/explain.sql b/crates/squawk_parser/tests/data/regression_suite/explain.sql index 7842d25d..ebdab426 100644 --- a/crates/squawk_parser/tests/data/regression_suite/explain.sql +++ b/crates/squawk_parser/tests/data/regression_suite/explain.sql @@ -61,6 +61,7 @@ set track_io_timing = off; -- Simple cases +explain (costs off) select 1 as a, 2 as b having false; select explain_filter('explain select * from int8_tbl i8'); select explain_filter('explain (analyze, buffers off) select * from int8_tbl i8'); select explain_filter('explain (analyze, buffers off, verbose) select * from int8_tbl i8'); diff --git a/crates/squawk_parser/tests/data/regression_suite/expressions.sql b/crates/squawk_parser/tests/data/regression_suite/expressions.sql index 723e6092..db96bea6 100644 --- a/crates/squawk_parser/tests/data/regression_suite/expressions.sql +++ b/crates/squawk_parser/tests/data/regression_suite/expressions.sql @@ -56,6 +56,7 @@ create view numeric_view as f2, f2::numeric(16,4) as f2164, f2::numeric as f2n from numeric_tbl; +-- \d+ numeric_view explain (verbose, costs off) select * from numeric_view; @@ -70,6 +71,7 @@ create view bpchar_view as f2, f2::character(14) as f214, f2::bpchar as f2n from bpchar_tbl; +-- \d+ bpchar_view explain (verbose, costs off) select * from bpchar_view where f1::bpchar = 'foo'; diff --git a/crates/squawk_parser/tests/data/regression_suite/foreign_data.sql b/crates/squawk_parser/tests/data/regression_suite/foreign_data.sql index 6b5c8593..99e33aed 100644 --- a/crates/squawk_parser/tests/data/regression_suite/foreign_data.sql +++ b/crates/squawk_parser/tests/data/regression_suite/foreign_data.sql @@ -3,7 +3,10 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix CREATE FUNCTION test_fdw_handler() RETURNS fdw_handler @@ -38,77 +41,90 @@ SELECT srvname, srvoptions FROM pg_foreign_server; SELECT * FROM pg_user_mapping; -- CREATE FOREIGN DATA WRAPPER --- CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR +CREATE FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR CREATE FOREIGN DATA WRAPPER foo; +-- \dew CREATE FOREIGN DATA WRAPPER foo; -- duplicate DROP FOREIGN DATA WRAPPER foo; CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1'); +-- \dew+ DROP FOREIGN DATA WRAPPER foo; --- CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2'); -- ERROR +CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', testing '2'); -- ERROR CREATE FOREIGN DATA WRAPPER foo OPTIONS (testing '1', another '2'); +-- \dew+ DROP FOREIGN DATA WRAPPER foo; SET ROLE regress_test_role; --- CREATE FOREIGN DATA WRAPPER foo; -- ERROR +CREATE FOREIGN DATA WRAPPER foo; -- ERROR RESET ROLE; CREATE FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; +-- \dew+ -- HANDLER related checks CREATE FUNCTION invalid_fdw_handler() RETURNS int LANGUAGE SQL AS 'SELECT 1;'; --- CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR --- CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER invalid_fdw_handler; -- ERROR +CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler HANDLER invalid_fdw_handler; -- ERROR CREATE FOREIGN DATA WRAPPER test_fdw HANDLER test_fdw_handler; DROP FOREIGN DATA WRAPPER test_fdw; -- ALTER FOREIGN DATA WRAPPER --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (nonexistent 'fdw'); -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (nonexistent 'fdw'); -- ERROR -- ALTER FOREIGN DATA WRAPPER foo; -- ERROR --- ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR +ALTER FOREIGN DATA WRAPPER foo VALIDATOR bar; -- ERROR ALTER FOREIGN DATA WRAPPER foo NO VALIDATOR; +-- \dew+ ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '1', b '2'); --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4'); -- ERROR --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c); -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (SET c '4'); -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP c); -- ERROR ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD x '1', DROP x); +-- \dew+ ALTER FOREIGN DATA WRAPPER foo OPTIONS (DROP a, SET b '3', ADD c '4'); +-- \dew+ ALTER FOREIGN DATA WRAPPER foo OPTIONS (a '2'); --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4'); -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (b '4'); -- ERROR +-- \dew+ SET ROLE regress_test_role; --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); -- ERROR SET ROLE regress_test_role_super; ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD d '5'); +-- \dew+ --- ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role; -- ERROR +ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role; -- ERROR ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_test_role_super; ALTER ROLE regress_test_role_super NOSUPERUSER; SET ROLE regress_test_role_super; --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD e '6'); -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (ADD e '6'); -- ERROR RESET ROLE; +-- \dew+ ALTER FOREIGN DATA WRAPPER foo RENAME TO foo1; +-- \dew+ ALTER FOREIGN DATA WRAPPER foo1 RENAME TO foo; -- HANDLER related checks --- ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR --- ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR +ALTER FOREIGN DATA WRAPPER foo HANDLER invalid_fdw_handler; -- ERROR +ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler HANDLER anything; -- ERROR ALTER FOREIGN DATA WRAPPER foo HANDLER test_fdw_handler; DROP FUNCTION invalid_fdw_handler(); -- DROP FOREIGN DATA WRAPPER --- DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR +DROP FOREIGN DATA WRAPPER nonexistent; -- ERROR DROP FOREIGN DATA WRAPPER IF EXISTS nonexistent; +-- \dew+ --- DROP ROLE regress_test_role_super; -- ERROR +DROP ROLE regress_test_role_super; -- ERROR SET ROLE regress_test_role_super; DROP FOREIGN DATA WRAPPER foo; RESET ROLE; DROP ROLE regress_test_role_super; +-- \dew+ CREATE FOREIGN DATA WRAPPER foo; CREATE SERVER s1 FOREIGN DATA WRAPPER foo; @@ -116,17 +132,23 @@ COMMENT ON SERVER s1 IS 'foreign server'; CREATE USER MAPPING FOR current_user SERVER s1; CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR CREATE USER MAPPING IF NOT EXISTS FOR current_user SERVER s1; -- NOTICE --- DROP FOREIGN DATA WRAPPER foo; -- ERROR +-- \dew+ +-- \des+ +-- \deu+ +DROP FOREIGN DATA WRAPPER foo; -- ERROR SET ROLE regress_test_role; --- DROP FOREIGN DATA WRAPPER foo CASCADE; -- ERROR +DROP FOREIGN DATA WRAPPER foo CASCADE; -- ERROR RESET ROLE; DROP FOREIGN DATA WRAPPER foo CASCADE; +-- \dew+ +-- \des+ +-- \deu+ -- exercise CREATE SERVER --- CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR +CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR CREATE FOREIGN DATA WRAPPER foo OPTIONS ("test wrapper" 'true'); CREATE SERVER s1 FOREIGN DATA WRAPPER foo; --- CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR +CREATE SERVER s1 FOREIGN DATA WRAPPER foo; -- ERROR CREATE SERVER IF NOT EXISTS s1 FOREIGN DATA WRAPPER foo; -- No ERROR, just NOTICE CREATE SERVER s2 FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); CREATE SERVER s3 TYPE 'oracle' FOREIGN DATA WRAPPER foo; @@ -134,49 +156,53 @@ CREATE SERVER s4 TYPE 'oracle' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbnam CREATE SERVER s5 VERSION '15.0' FOREIGN DATA WRAPPER foo; CREATE SERVER s6 VERSION '16.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); CREATE SERVER s7 TYPE 'oracle' VERSION '17.0' FOREIGN DATA WRAPPER foo OPTIONS (host 'a', dbname 'b'); --- CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR +CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (foo '1'); -- ERROR CREATE SERVER s8 FOREIGN DATA WRAPPER postgresql OPTIONS (host 'localhost', dbname 's8db'); +-- \des+ SET ROLE regress_test_role; --- CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW +CREATE SERVER t1 FOREIGN DATA WRAPPER foo; -- ERROR: no usage on FDW RESET ROLE; GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; SET ROLE regress_test_role; CREATE SERVER t1 FOREIGN DATA WRAPPER foo; RESET ROLE; +-- \des+ REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_test_role; GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; SET ROLE regress_test_role; --- CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -- ERROR +CREATE SERVER t2 FOREIGN DATA WRAPPER foo; -- ERROR RESET ROLE; GRANT regress_test_indirect TO regress_test_role; SET ROLE regress_test_role; CREATE SERVER t2 FOREIGN DATA WRAPPER foo; +-- \des+ RESET ROLE; REVOKE regress_test_indirect FROM regress_test_role; -- ALTER SERVER -- ALTER SERVER s0; -- ERROR --- ALTER SERVER s0 OPTIONS (a '1'); -- ERROR +ALTER SERVER s0 OPTIONS (a '1'); -- ERROR ALTER SERVER s1 VERSION '1.0' OPTIONS (servername 's1'); ALTER SERVER s2 VERSION '1.1'; ALTER SERVER s3 OPTIONS ("tns name" 'orcl', port '1521'); GRANT USAGE ON FOREIGN SERVER s1 TO regress_test_role; GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role2 WITH GRANT OPTION; +-- \des+ SET ROLE regress_test_role; --- ALTER SERVER s1 VERSION '1.1'; -- ERROR --- ALTER SERVER s1 OWNER TO regress_test_role; -- ERROR +ALTER SERVER s1 VERSION '1.1'; -- ERROR +ALTER SERVER s1 OWNER TO regress_test_role; -- ERROR RESET ROLE; ALTER SERVER s1 OWNER TO regress_test_role; GRANT regress_test_role2 TO regress_test_role; SET ROLE regress_test_role; ALTER SERVER s1 VERSION '1.1'; --- ALTER SERVER s1 OWNER TO regress_test_role2; -- ERROR +ALTER SERVER s1 OWNER TO regress_test_role2; -- ERROR RESET ROLE; --- ALTER SERVER s8 OPTIONS (foo '1'); -- ERROR option validation +ALTER SERVER s8 OPTIONS (foo '1'); -- ERROR option validation ALTER SERVER s8 OPTIONS (connect_timeout '30', SET dbname 'db1', DROP host); SET ROLE regress_test_role; --- ALTER SERVER s1 OWNER TO regress_test_indirect; -- ERROR +ALTER SERVER s1 OWNER TO regress_test_indirect; -- ERROR RESET ROLE; GRANT regress_test_indirect TO regress_test_role; SET ROLE regress_test_role; @@ -186,41 +212,49 @@ GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_indirect; SET ROLE regress_test_role; ALTER SERVER s1 OWNER TO regress_test_indirect; RESET ROLE; --- DROP ROLE regress_test_indirect; -- ERROR +DROP ROLE regress_test_indirect; -- ERROR +-- \des+ ALTER SERVER s8 RENAME to s8new; +-- \des+ ALTER SERVER s8new RENAME to s8; -- DROP SERVER --- DROP SERVER nonexistent; -- ERROR +DROP SERVER nonexistent; -- ERROR DROP SERVER IF EXISTS nonexistent; +-- \des SET ROLE regress_test_role; --- DROP SERVER s2; -- ERROR +DROP SERVER s2; -- ERROR DROP SERVER s1; RESET ROLE; +-- \des ALTER SERVER s2 OWNER TO regress_test_role; SET ROLE regress_test_role; DROP SERVER s2; RESET ROLE; +-- \des CREATE USER MAPPING FOR current_user SERVER s3; --- DROP SERVER s3; -- ERROR +-- \deu +DROP SERVER s3; -- ERROR DROP SERVER s3 CASCADE; +-- \des +-- \deu -- CREATE USER MAPPING --- CREATE USER MAPPING FOR regress_test_missing_role SERVER s1; -- ERROR --- CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR +CREATE USER MAPPING FOR regress_test_missing_role SERVER s1; -- ERROR +CREATE USER MAPPING FOR current_user SERVER s1; -- ERROR CREATE USER MAPPING FOR current_user SERVER s4; --- CREATE USER MAPPING FOR user SERVER s4; -- ERROR duplicate +CREATE USER MAPPING FOR user SERVER s4; -- ERROR duplicate CREATE USER MAPPING FOR public SERVER s4 OPTIONS ("this mapping" 'is public'); --- CREATE USER MAPPING FOR user SERVER s8 OPTIONS (username 'test', password 'secret'); -- ERROR +CREATE USER MAPPING FOR user SERVER s8 OPTIONS (username 'test', password 'secret'); -- ERROR CREATE USER MAPPING FOR user SERVER s8 OPTIONS (user 'test', password 'secret'); ALTER SERVER s5 OWNER TO regress_test_role; ALTER SERVER s6 OWNER TO regress_test_indirect; SET ROLE regress_test_role; CREATE USER MAPPING FOR current_user SERVER s5; CREATE USER MAPPING FOR current_user SERVER s6 OPTIONS (username 'test'); --- CREATE USER MAPPING FOR current_user SERVER s7; -- ERROR --- CREATE USER MAPPING FOR public SERVER s8; -- ERROR +CREATE USER MAPPING FOR current_user SERVER s7; -- ERROR +CREATE USER MAPPING FOR public SERVER s8; -- ERROR RESET ROLE; ALTER SERVER t1 OWNER TO regress_test_indirect; @@ -228,55 +262,58 @@ SET ROLE regress_test_role; CREATE USER MAPPING FOR current_user SERVER t1 OPTIONS (username 'bob', password 'boo'); CREATE USER MAPPING FOR public SERVER t1; RESET ROLE; +-- \deu -- ALTER USER MAPPING --- ALTER USER MAPPING FOR regress_test_missing_role SERVER s4 OPTIONS (gotcha 'true'); -- ERROR +ALTER USER MAPPING FOR regress_test_missing_role SERVER s4 OPTIONS (gotcha 'true'); -- ERROR -- ALTER USER MAPPING FOR user SERVER ss4 OPTIONS (gotcha 'true'); -- ERROR --- ALTER USER MAPPING FOR public SERVER s5 OPTIONS (gotcha 'true'); -- ERROR --- ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (username 'test'); -- ERROR +ALTER USER MAPPING FOR public SERVER s5 OPTIONS (gotcha 'true'); -- ERROR +ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (username 'test'); -- ERROR ALTER USER MAPPING FOR current_user SERVER s8 OPTIONS (DROP user, SET password 'public'); SET ROLE regress_test_role; ALTER USER MAPPING FOR current_user SERVER s5 OPTIONS (ADD modified '1'); ALTER USER MAPPING FOR public SERVER s4 OPTIONS (ADD modified '1'); -- ERROR ALTER USER MAPPING FOR public SERVER t1 OPTIONS (ADD modified '1'); RESET ROLE; +-- \deu+ -- DROP USER MAPPING --- DROP USER MAPPING FOR regress_test_missing_role SERVER s4; -- ERROR +DROP USER MAPPING FOR regress_test_missing_role SERVER s4; -- ERROR DROP USER MAPPING FOR user SERVER ss4; --- DROP USER MAPPING FOR public SERVER s7; -- ERROR +DROP USER MAPPING FOR public SERVER s7; -- ERROR DROP USER MAPPING IF EXISTS FOR regress_test_missing_role SERVER s4; DROP USER MAPPING IF EXISTS FOR user SERVER ss4; DROP USER MAPPING IF EXISTS FOR public SERVER s7; CREATE USER MAPPING FOR public SERVER s8; SET ROLE regress_test_role; --- DROP USER MAPPING FOR public SERVER s8; -- ERROR +DROP USER MAPPING FOR public SERVER s8; -- ERROR RESET ROLE; DROP SERVER s7; +-- \deu -- CREATE FOREIGN TABLE CREATE SCHEMA foreign_schema; CREATE SERVER s0 FOREIGN DATA WRAPPER dummy; -- CREATE FOREIGN TABLE ft1 (); -- ERROR --- CREATE FOREIGN TABLE ft1 () SERVER no_server; -- ERROR --- CREATE FOREIGN TABLE ft1 ( --- c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, --- c2 text OPTIONS (param2 'val2', param3 'val3'), --- c3 date --- ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR +CREATE FOREIGN TABLE ft1 () SERVER no_server; -- ERROR +CREATE FOREIGN TABLE ft1 ( + c1 integer OPTIONS ("param 1" 'val1') PRIMARY KEY, + c2 text OPTIONS (param2 'val2', param3 'val3'), + c3 date +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR CREATE TABLE ref_table (id integer PRIMARY KEY); --- CREATE FOREIGN TABLE ft1 ( - -- c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table (id), - -- c2 text OPTIONS (param2 'val2', param3 'val3'), - -- c3 date --- ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR +CREATE FOREIGN TABLE ft1 ( + c1 integer OPTIONS ("param 1" 'val1') REFERENCES ref_table (id), + c2 text OPTIONS (param2 'val2', param3 'val3'), + c3 date +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR DROP TABLE ref_table; --- CREATE FOREIGN TABLE ft1 ( - -- c1 integer OPTIONS ("param 1" 'val1') NOT NULL, - -- c2 text OPTIONS (param2 'val2', param3 'val3'), - -- c3 date, - -- UNIQUE (c3) --- ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR +CREATE FOREIGN TABLE ft1 ( + c1 integer OPTIONS ("param 1" 'val1') NOT NULL, + c2 text OPTIONS (param2 'val2', param3 'val3'), + c3 date, + UNIQUE (c3) +) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- ERROR CREATE FOREIGN TABLE ft1 ( c1 integer OPTIONS ("param 1" 'val1') NOT NULL, c2 text OPTIONS (param2 'val2', param3 'val3') CHECK (c2 <> ''), @@ -285,16 +322,18 @@ CREATE FOREIGN TABLE ft1 ( ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); COMMENT ON FOREIGN TABLE ft1 IS 'ft1'; COMMENT ON COLUMN ft1.c1 IS 'ft1.c1'; --- CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR --- SELECT * FROM ft1; -- ERROR --- EXPLAIN SELECT * FROM ft1; -- ERROR +-- \d+ ft1 +-- \det+ +CREATE INDEX id_ft1_c2 ON ft1 (c2); -- ERROR +SELECT * FROM ft1; -- ERROR +EXPLAIN SELECT * FROM ft1; -- ERROR CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); CREATE FOREIGN TABLE ft_part1 PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; CREATE INDEX ON lt1 (a); -- skips partition --- CREATE UNIQUE INDEX ON lt1 (a); -- ERROR --- ALTER TABLE lt1 ADD PRIMARY KEY (a); -- ERROR +CREATE UNIQUE INDEX ON lt1 (a); -- ERROR +ALTER TABLE lt1 ADD PRIMARY KEY (a); -- ERROR DROP TABLE lt1; CREATE TABLE lt1 (a INT) PARTITION BY RANGE (a); @@ -306,11 +345,11 @@ ALTER TABLE lt1 ATTACH PARTITION ft_part2 FOR VALUES FROM (1000) TO (2000); DROP FOREIGN TABLE ft_part1, ft_part2; CREATE UNIQUE INDEX ON lt1 (a); ALTER TABLE lt1 ADD PRIMARY KEY (a); --- CREATE FOREIGN TABLE ft_part1 - -- PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -- ERROR +CREATE FOREIGN TABLE ft_part1 + PARTITION OF lt1 FOR VALUES FROM (0) TO (1000) SERVER s0; -- ERROR CREATE FOREIGN TABLE ft_part2 (a INT NOT NULL) SERVER s0; --- ALTER TABLE lt1 ATTACH PARTITION ft_part2 - -- FOR VALUES FROM (1000) TO (2000); -- ERROR +ALTER TABLE lt1 ATTACH PARTITION ft_part2 + FOR VALUES FROM (1000) TO (2000); -- ERROR DROP TABLE lt1; DROP FOREIGN TABLE ft_part2; @@ -353,10 +392,10 @@ ALTER FOREIGN TABLE ft1 ALTER COLUMN c4 SET DEFAULT 0; ALTER FOREIGN TABLE ft1 ALTER COLUMN c5 DROP DEFAULT; ALTER FOREIGN TABLE ft1 ALTER COLUMN c6 SET NOT NULL; ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 DROP NOT NULL; --- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 TYPE char(10); ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE text; --- ALTER FOREIGN TABLE ft1 ALTER COLUMN xmin OPTIONS (ADD p1 'v1'); -- ERROR +ALTER FOREIGN TABLE ft1 ALTER COLUMN xmin OPTIONS (ADD p1 'v1'); -- ERROR ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'), ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2'); ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1); @@ -364,27 +403,29 @@ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000; ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100); ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1; ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STORAGE PLAIN; +-- \d+ ft1 -- can't change the column type if it's used elsewhere CREATE TABLE use_ft1_column_type (x ft1); --- ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE integer; -- ERROR +ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET DATA TYPE integer; -- ERROR DROP TABLE use_ft1_column_type; --- ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); -- ERROR +ALTER FOREIGN TABLE ft1 ADD PRIMARY KEY (c7); -- ERROR ALTER FOREIGN TABLE ft1 ADD CONSTRAINT ft1_c9_check CHECK (c9 < 0) NOT VALID; --- ALTER FOREIGN TABLE ft1 ALTER CONSTRAINT ft1_c9_check DEFERRABLE; -- ERROR +ALTER FOREIGN TABLE ft1 ALTER CONSTRAINT ft1_c9_check DEFERRABLE; -- ERROR ALTER FOREIGN TABLE ft1 DROP CONSTRAINT ft1_c9_check; --- ALTER FOREIGN TABLE ft1 DROP CONSTRAINT no_const; -- ERROR +ALTER FOREIGN TABLE ft1 DROP CONSTRAINT no_const; -- ERROR ALTER FOREIGN TABLE ft1 DROP CONSTRAINT IF EXISTS no_const; ALTER FOREIGN TABLE ft1 OWNER TO regress_test_role; ALTER FOREIGN TABLE ft1 OPTIONS (DROP delimiter, SET quote '~', ADD escape '@'); --- ALTER FOREIGN TABLE ft1 DROP COLUMN no_column; -- ERROR +ALTER FOREIGN TABLE ft1 DROP COLUMN no_column; -- ERROR ALTER FOREIGN TABLE ft1 DROP COLUMN IF EXISTS no_column; ALTER FOREIGN TABLE ft1 DROP COLUMN c9; ALTER FOREIGN TABLE ft1 ADD COLUMN c11 serial; ALTER FOREIGN TABLE ft1 SET SCHEMA foreign_schema; --- ALTER FOREIGN TABLE ft1 SET TABLESPACE ts; -- ERROR --- ALTER SEQUENCE foreign_schema.ft1_c11_seq SET SCHEMA public; -- ERROR +ALTER FOREIGN TABLE ft1 SET TABLESPACE ts; -- ERROR +ALTER SEQUENCE foreign_schema.ft1_c11_seq SET SCHEMA public; -- ERROR ALTER FOREIGN TABLE foreign_schema.ft1 RENAME c1 TO foreign_column_1; ALTER FOREIGN TABLE foreign_schema.ft1 RENAME TO foreign_table_1; +-- \d foreign_schema.foreign_table_1 -- alter noexisting table ALTER FOREIGN TABLE IF EXISTS doesnt_exist_ft1 ADD COLUMN c4 integer; @@ -473,45 +514,45 @@ ALTER FOREIGN DATA WRAPPER foo VALIDATOR postgresql_fdw_validator; -- Privileges SET ROLE regress_unprivileged_role; --- CREATE FOREIGN DATA WRAPPER foobar; -- ERROR --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR --- ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_unprivileged_role; -- ERROR --- DROP FOREIGN DATA WRAPPER foo; -- ERROR --- GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR --- CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -- ERROR --- ALTER SERVER s4 VERSION '0.5'; -- ERROR --- ALTER SERVER s4 OWNER TO regress_unprivileged_role; -- ERROR --- DROP SERVER s4; -- ERROR --- GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -- ERROR --- CREATE USER MAPPING FOR public SERVER s4; -- ERROR --- ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR --- DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR +CREATE FOREIGN DATA WRAPPER foobar; -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR +ALTER FOREIGN DATA WRAPPER foo OWNER TO regress_unprivileged_role; -- ERROR +DROP FOREIGN DATA WRAPPER foo; -- ERROR +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR +CREATE SERVER s9 FOREIGN DATA WRAPPER foo; -- ERROR +ALTER SERVER s4 VERSION '0.5'; -- ERROR +ALTER SERVER s4 OWNER TO regress_unprivileged_role; -- ERROR +DROP SERVER s4; -- ERROR +GRANT USAGE ON FOREIGN SERVER s4 TO regress_test_role; -- ERROR +CREATE USER MAPPING FOR public SERVER s4; -- ERROR +ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR +DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR RESET ROLE; GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_unprivileged_role; GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_unprivileged_role WITH GRANT OPTION; SET ROLE regress_unprivileged_role; --- CREATE FOREIGN DATA WRAPPER foobar; -- ERROR --- ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR --- DROP FOREIGN DATA WRAPPER foo; -- ERROR +CREATE FOREIGN DATA WRAPPER foobar; -- ERROR +ALTER FOREIGN DATA WRAPPER foo OPTIONS (gotcha 'true'); -- ERROR +DROP FOREIGN DATA WRAPPER foo; -- ERROR GRANT USAGE ON FOREIGN DATA WRAPPER postgresql TO regress_test_role; -- WARNING GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; CREATE SERVER s9 FOREIGN DATA WRAPPER postgresql; --- ALTER SERVER s6 VERSION '0.5'; -- ERROR --- DROP SERVER s6; -- ERROR --- GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role; -- ERROR +ALTER SERVER s6 VERSION '0.5'; -- ERROR +DROP SERVER s6; -- ERROR +GRANT USAGE ON FOREIGN SERVER s6 TO regress_test_role; -- ERROR GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; --- CREATE USER MAPPING FOR public SERVER s6; -- ERROR +CREATE USER MAPPING FOR public SERVER s6; -- ERROR CREATE USER MAPPING FOR public SERVER s9; --- ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR +ALTER USER MAPPING FOR regress_test_role SERVER s6 OPTIONS (gotcha 'true'); -- ERROR DROP USER MAPPING FOR regress_test_role SERVER s6; -- ERROR RESET ROLE; --- REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role; -- ERROR +REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role; -- ERROR REVOKE USAGE ON FOREIGN DATA WRAPPER foo FROM regress_unprivileged_role CASCADE; SET ROLE regress_unprivileged_role; --- GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR --- CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -- ERROR +GRANT USAGE ON FOREIGN DATA WRAPPER foo TO regress_test_role; -- ERROR +CREATE SERVER s10 FOREIGN DATA WRAPPER foo; -- ERROR ALTER SERVER s9 VERSION '1.1'; GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; CREATE USER MAPPING FOR current_user SERVER s9; @@ -520,10 +561,10 @@ RESET ROLE; CREATE SERVER s9 FOREIGN DATA WRAPPER foo; GRANT USAGE ON FOREIGN SERVER s9 TO regress_unprivileged_role; SET ROLE regress_unprivileged_role; --- ALTER SERVER s9 VERSION '1.2'; -- ERROR +ALTER SERVER s9 VERSION '1.2'; -- ERROR GRANT USAGE ON FOREIGN SERVER s9 TO regress_test_role; -- WARNING CREATE USER MAPPING FOR current_user SERVER s9; --- DROP SERVER s9 CASCADE; -- ERROR +DROP SERVER s9 CASCADE; -- ERROR -- Check visibility of user mapping data SET ROLE regress_test_role; @@ -531,10 +572,13 @@ CREATE SERVER s10 FOREIGN DATA WRAPPER foo; CREATE USER MAPPING FOR public SERVER s10 OPTIONS (user 'secret'); CREATE USER MAPPING FOR regress_unprivileged_role SERVER s10 OPTIONS (user 'secret'); -- owner of server can see some option fields +-- \deu+ RESET ROLE; -- superuser can see all option fields +-- \deu+ -- unprivileged user cannot see any option field SET ROLE regress_unprivileged_role; +-- \deu+ RESET ROLE; DROP SERVER s10 CASCADE; @@ -555,11 +599,11 @@ ON foreign_schema.foreign_table_1 FOR EACH STATEMENT EXECUTE PROCEDURE dummy_trigger(); --- CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -- ERROR --- ON foreign_schema.foreign_table_1 --- REFERENCING NEW TABLE AS new_table --- FOR EACH STATEMENT --- EXECUTE PROCEDURE dummy_trigger(); +CREATE TRIGGER trigtest_after_stmt_tt AFTER INSERT OR UPDATE OR DELETE -- ERROR +ON foreign_schema.foreign_table_1 +REFERENCING NEW TABLE AS new_table +FOR EACH STATEMENT +EXECUTE PROCEDURE dummy_trigger(); CREATE TRIGGER trigtest_before_row BEFORE INSERT OR UPDATE OR DELETE ON foreign_schema.foreign_table_1 @@ -596,13 +640,19 @@ CREATE TABLE fd_pt1 ( ); CREATE FOREIGN TABLE ft2 () INHERITS (fd_pt1) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +-- \d+ fd_pt1 +-- \d+ ft2 DROP FOREIGN TABLE ft2; +-- \d+ fd_pt1 CREATE FOREIGN TABLE ft2 ( c1 integer NOT NULL, c2 text, c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +-- \d+ ft2 ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; +-- \d+ fd_pt1 +-- \d+ ft2 CREATE TABLE ct3() INHERITS(ft2); CREATE FOREIGN TABLE ft3 ( c1 integer NOT NULL, @@ -610,6 +660,9 @@ CREATE FOREIGN TABLE ft3 ( c3 date ) INHERITS(ft2) SERVER s0; +-- \d+ ft2 +-- \d+ ct3 +-- \d+ ft3 -- add attributes recursively ALTER TABLE fd_pt1 ADD COLUMN c4 integer; @@ -617,19 +670,25 @@ ALTER TABLE fd_pt1 ADD COLUMN c5 integer DEFAULT 0; ALTER TABLE fd_pt1 ADD COLUMN c6 integer; ALTER TABLE fd_pt1 ADD COLUMN c7 integer NOT NULL; ALTER TABLE fd_pt1 ADD COLUMN c8 integer; +-- \d+ fd_pt1 +-- \d+ ft2 +-- \d+ ct3 +-- \d+ ft3 -- alter attributes recursively ALTER TABLE fd_pt1 ALTER COLUMN c4 SET DEFAULT 0; ALTER TABLE fd_pt1 ALTER COLUMN c5 DROP DEFAULT; ALTER TABLE fd_pt1 ALTER COLUMN c6 SET NOT NULL; ALTER TABLE fd_pt1 ALTER COLUMN c7 DROP NOT NULL; --- ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR +ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10) USING '0'; -- ERROR ALTER TABLE fd_pt1 ALTER COLUMN c8 TYPE char(10); ALTER TABLE fd_pt1 ALTER COLUMN c8 SET DATA TYPE text; ALTER TABLE fd_pt1 ALTER COLUMN c1 SET STATISTICS 10000; ALTER TABLE fd_pt1 ALTER COLUMN c1 SET (n_distinct = 100); ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STATISTICS -1; ALTER TABLE fd_pt1 ALTER COLUMN c8 SET STORAGE EXTERNAL; +-- \d+ fd_pt1 +-- \d+ ft2 -- drop attributes recursively ALTER TABLE fd_pt1 DROP COLUMN c4; @@ -637,6 +696,8 @@ ALTER TABLE fd_pt1 DROP COLUMN c5; ALTER TABLE fd_pt1 DROP COLUMN c6; ALTER TABLE fd_pt1 DROP COLUMN c7; ALTER TABLE fd_pt1 DROP COLUMN c8; +-- \d+ fd_pt1 +-- \d+ ft2 -- add constraints recursively ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk1 CHECK (c1 > 0) NO INHERIT; @@ -647,7 +708,9 @@ SELECT relname, conname, contype, conislocal, coninhcount, connoinherit WHERE pc.relname = 'fd_pt1' ORDER BY 1,2; -- child does not inherit NO INHERIT constraints --- DROP FOREIGN TABLE ft2; -- ERROR +-- \d+ fd_pt1 +-- \d+ ft2 +DROP FOREIGN TABLE ft2; -- ERROR DROP FOREIGN TABLE ft2 CASCADE; CREATE FOREIGN TABLE ft2 ( c1 integer NOT NULL, @@ -655,10 +718,12 @@ CREATE FOREIGN TABLE ft2 ( c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); -- child must have parent's INHERIT constraints --- ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR +ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- ERROR ALTER FOREIGN TABLE ft2 ADD CONSTRAINT fd_pt1chk2 CHECK (c2 <> ''); ALTER FOREIGN TABLE ft2 INHERIT fd_pt1; -- child does not inherit NO INHERIT constraints +-- \d+ fd_pt1 +-- \d+ ft2 -- drop constraints recursively ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk1 CASCADE; @@ -667,8 +732,12 @@ ALTER TABLE fd_pt1 DROP CONSTRAINT fd_pt1chk2 CASCADE; -- NOT VALID case INSERT INTO fd_pt1 VALUES (1, 'fd_pt1'::text, '1994-01-01'::date); ALTER TABLE fd_pt1 ADD CONSTRAINT fd_pt1chk3 CHECK (c2 <> '') NOT VALID; +-- \d+ fd_pt1 +-- \d+ ft2 -- VALIDATE CONSTRAINT need do nothing on foreign tables ALTER TABLE fd_pt1 VALIDATE CONSTRAINT fd_pt1chk3; +-- \d+ fd_pt1 +-- \d+ ft2 -- changes name of an attribute recursively ALTER TABLE fd_pt1 RENAME COLUMN c1 TO f1; @@ -676,18 +745,20 @@ ALTER TABLE fd_pt1 RENAME COLUMN c2 TO f2; ALTER TABLE fd_pt1 RENAME COLUMN c3 TO f3; -- changes name of a constraint recursively ALTER TABLE fd_pt1 RENAME CONSTRAINT fd_pt1chk3 TO f2_check; +-- \d+ fd_pt1 +-- \d+ ft2 DROP TABLE fd_pt1 CASCADE; -- IMPORT FOREIGN SCHEMA --- IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR --- IMPORT FOREIGN SCHEMA s1 LIMIT TO (t1) FROM SERVER s9 INTO public; --ERROR --- IMPORT FOREIGN SCHEMA s1 EXCEPT (t1) FROM SERVER s9 INTO public; -- ERROR --- IMPORT FOREIGN SCHEMA s1 EXCEPT (t1, t2) FROM SERVER s9 INTO public --- OPTIONS (option1 'value1', option2 'value2'); -- ERROR +IMPORT FOREIGN SCHEMA s1 FROM SERVER s9 INTO public; -- ERROR +IMPORT FOREIGN SCHEMA s1 LIMIT TO (t1) FROM SERVER s9 INTO public; --ERROR +IMPORT FOREIGN SCHEMA s1 EXCEPT (t1) FROM SERVER s9 INTO public; -- ERROR +IMPORT FOREIGN SCHEMA s1 EXCEPT (t1, t2) FROM SERVER s9 INTO public +OPTIONS (option1 'value1', option2 'value2'); -- ERROR -- DROP FOREIGN TABLE --- DROP FOREIGN TABLE no_table; -- ERROR +DROP FOREIGN TABLE no_table; -- ERROR DROP FOREIGN TABLE IF EXISTS no_table; DROP FOREIGN TABLE foreign_schema.foreign_table_1; @@ -704,6 +775,8 @@ CREATE TABLE fd_pt2 ( ) PARTITION BY LIST (c1); CREATE FOREIGN TABLE fd_pt2_1 PARTITION OF fd_pt2 FOR VALUES IN (1) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +-- \d+ fd_pt2 +-- \d+ fd_pt2_1 -- partition cannot have additional columns DROP FOREIGN TABLE fd_pt2_1; @@ -713,16 +786,21 @@ CREATE FOREIGN TABLE fd_pt2_1 ( c3 date, c4 char ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); --- ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +-- \d+ fd_pt2_1 +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR DROP FOREIGN TABLE fd_pt2_1; +-- \d+ fd_pt2 CREATE FOREIGN TABLE fd_pt2_1 ( c1 integer NOT NULL, c2 text, c3 date ) SERVER s0 OPTIONS (delimiter ',', quote '"', "be quoted" 'value'); +-- \d+ fd_pt2_1 -- no attach partition validation occurs for foreign tables ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); +-- \d+ fd_pt2 +-- \d+ fd_pt2_1 -- cannot add column to a partition ALTER TABLE fd_pt2_1 ADD c4 char; @@ -730,6 +808,8 @@ ALTER TABLE fd_pt2_1 ADD c4 char; -- ok to have a partition's own constraints though ALTER TABLE fd_pt2_1 ALTER c3 SET NOT NULL; ALTER TABLE fd_pt2_1 ADD CONSTRAINT p21chk CHECK (c2 <> ''); +-- \d+ fd_pt2 +-- \d+ fd_pt2_1 -- cannot drop inherited NOT NULL constraint from a partition ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL; @@ -737,13 +817,17 @@ ALTER TABLE fd_pt2_1 ALTER c1 DROP NOT NULL; -- partition must have parent's constraints ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; ALTER TABLE fd_pt2 ALTER c2 SET NOT NULL; --- ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +-- \d+ fd_pt2 +-- \d+ fd_pt2_1 +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR ALTER FOREIGN TABLE fd_pt2_1 ALTER c2 SET NOT NULL; ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); ALTER TABLE fd_pt2 DETACH PARTITION fd_pt2_1; ALTER TABLE fd_pt2 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); --- ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR +-- \d+ fd_pt2 +-- \d+ fd_pt2_1 +ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); -- ERROR ALTER FOREIGN TABLE fd_pt2_1 ADD CONSTRAINT fd_pt2chk1 CHECK (c1 > 0); ALTER TABLE fd_pt2 ATTACH PARTITION fd_pt2_1 FOR VALUES IN (1); @@ -753,28 +837,29 @@ DROP TABLE fd_pt2; -- foreign table cannot be part of partition tree made of temporary -- relations. CREATE TEMP TABLE temp_parted (a int) PARTITION BY LIST (a); --- CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT --- SERVER s0; -- ERROR +CREATE FOREIGN TABLE foreign_part PARTITION OF temp_parted DEFAULT + SERVER s0; -- ERROR CREATE FOREIGN TABLE foreign_part (a int) SERVER s0; --- ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR +ALTER TABLE temp_parted ATTACH PARTITION foreign_part DEFAULT; -- ERROR DROP FOREIGN TABLE foreign_part; DROP TABLE temp_parted; -- Cleanup DROP SCHEMA foreign_schema CASCADE; --- DROP ROLE regress_test_role; -- ERROR +DROP ROLE regress_test_role; -- ERROR DROP SERVER t1 CASCADE; DROP USER MAPPING FOR regress_test_role SERVER s6; DROP FOREIGN DATA WRAPPER foo CASCADE; DROP SERVER s8 CASCADE; DROP ROLE regress_test_indirect; DROP ROLE regress_test_role; --- DROP ROLE regress_unprivileged_role; -- ERROR +DROP ROLE regress_unprivileged_role; -- ERROR REVOKE ALL ON FOREIGN DATA WRAPPER postgresql FROM regress_unprivileged_role; DROP ROLE regress_unprivileged_role; DROP ROLE regress_test_role2; DROP FOREIGN DATA WRAPPER postgresql CASCADE; DROP FOREIGN DATA WRAPPER dummy CASCADE; +-- \c DROP ROLE regress_foreign_data_user; -- At this point we should have no wrappers, no servers, and no mappings. diff --git a/crates/squawk_parser/tests/data/regression_suite/foreign_key.sql b/crates/squawk_parser/tests/data/regression_suite/foreign_key.sql index 5974f038..535c7de0 100644 --- a/crates/squawk_parser/tests/data/regression_suite/foreign_key.sql +++ b/crates/squawk_parser/tests/data/regression_suite/foreign_key.sql @@ -1294,8 +1294,9 @@ UPDATE fk_notpartitioned_pk SET b = 502 WHERE a = 500; UPDATE fk_notpartitioned_pk SET b = 1502 WHERE a = 1500; UPDATE fk_notpartitioned_pk SET b = 2504 WHERE a = 2500; -- check psql behavior +-- \d fk_notpartitioned_pk --- Check the exsting FK trigger +-- Check the existing FK trigger SELECT conname, tgrelid::regclass as tgrel, regexp_replace(tgname, '[0-9]+', 'N') as tgname, tgtype FROM pg_trigger t JOIN pg_constraint c ON (t.tgconstraint = c.oid) WHERE tgrelid IN (SELECT relid FROM pg_partition_tree('fk_partitioned_fk'::regclass) @@ -1388,22 +1389,44 @@ WHERE conrelid::regclass::text like 'fk_partitioned_fk%' ORDER BY oid::regclass: DROP TABLE fk_partitioned_fk, fk_notpartitioned_pk; --- NOT VALID foreign key on a non-partitioned table referencing a partitioned table +-- NOT VALID and NOT ENFORCED foreign key on a non-partitioned table +-- referencing a partitioned table CREATE TABLE fk_partitioned_pk (a int, b int, PRIMARY KEY (a, b)) PARTITION BY RANGE (a, b); CREATE TABLE fk_partitioned_pk_1 PARTITION OF fk_partitioned_pk FOR VALUES FROM (0,0) TO (1000,1000); +CREATE TABLE fk_partitioned_pk_2 PARTITION OF fk_partitioned_pk FOR VALUES FROM (1000,1000) TO (2000,2000); CREATE TABLE fk_notpartitioned_fk (b int, a int); -ALTER TABLE fk_notpartitioned_fk ADD FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID; - --- Constraint will be invalid. -SELECT conname, convalidated FROM pg_constraint +INSERT INTO fk_partitioned_pk VALUES(100,100), (1000,1000); +INSERT INTO fk_notpartitioned_fk VALUES(100,100), (1000,1000); +ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey + FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID; +ALTER TABLE fk_notpartitioned_fk ADD CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 + FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT ENFORCED; + +-- All constraints will be invalid, and _fkey2 constraints will not be enforced. +SELECT conname, conenforced, convalidated FROM pg_constraint WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text; ALTER TABLE fk_notpartitioned_fk VALIDATE CONSTRAINT fk_notpartitioned_fk_a_b_fkey; +ALTER TABLE fk_notpartitioned_fk ALTER CONSTRAINT fk_notpartitioned_fk_a_b_fkey2 ENFORCED; --- All constraints are now valid. -SELECT conname, convalidated FROM pg_constraint +-- All constraints are now valid and enforced. +SELECT conname, conenforced, convalidated FROM pg_constraint WHERE conrelid = 'fk_notpartitioned_fk'::regclass ORDER BY oid::regclass::text; +-- test a self-referential FK +ALTER TABLE fk_partitioned_pk ADD CONSTRAINT selffk FOREIGN KEY (a, b) REFERENCES fk_partitioned_pk NOT VALID; +CREATE TABLE fk_partitioned_pk_3 PARTITION OF fk_partitioned_pk FOR VALUES FROM (2000,2000) TO (3000,3000) + PARTITION BY RANGE (a); +CREATE TABLE fk_partitioned_pk_3_1 PARTITION OF fk_partitioned_pk_3 FOR VALUES FROM (2000) TO (2100); +SELECT conname, conenforced, convalidated FROM pg_constraint +WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f' +ORDER BY oid::regclass::text; +ALTER TABLE fk_partitioned_pk_2 VALIDATE CONSTRAINT selffk; +ALTER TABLE fk_partitioned_pk VALIDATE CONSTRAINT selffk; +SELECT conname, conenforced, convalidated FROM pg_constraint +WHERE conrelid = 'fk_partitioned_pk'::regclass AND contype = 'f' +ORDER BY oid::regclass::text; + DROP TABLE fk_notpartitioned_fk, fk_partitioned_pk; -- Test some other exotic foreign key features: MATCH SIMPLE, ON UPDATE/DELETE @@ -1515,6 +1538,7 @@ ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_2; BEGIN; DROP TABLE fk_partitioned_fk; -- constraint should still be there +-- \d fk_partitioned_fk_2; ROLLBACK; ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); DROP TABLE fk_partitioned_fk_2; @@ -1523,6 +1547,7 @@ CREATE TABLE fk_partitioned_fk_2 (b int, c text, a int, ALTER TABLE fk_partitioned_fk_2 DROP COLUMN c; ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); -- should have only one constraint +-- \d fk_partitioned_fk_2 DROP TABLE fk_partitioned_fk_2; CREATE TABLE fk_partitioned_fk_2 (b int, a int, @@ -1534,11 +1559,13 @@ BEGIN; -- change child constraint ALTER TABLE fk_partitioned_fk_2 ALTER CONSTRAINT fk_part_con ENFORCED; ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +-- \d fk_partitioned_fk_2 ROLLBACK; BEGIN; -- or change parent constraint ALTER TABLE fk_partitioned_fk ALTER CONSTRAINT fk_partitioned_fk_a_b_fkey NOT ENFORCED; ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_2 FOR VALUES IN (1500,1502); +-- \d fk_partitioned_fk_2 ROLLBACK; DROP TABLE fk_partitioned_fk_2; @@ -1550,7 +1577,10 @@ ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_4; ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_4 FOR VALUES IN (3500,3502); -- should only have one constraint +-- \d fk_partitioned_fk_4 +-- \d fk_partitioned_fk_4_1 -- this one has an FK with mismatched properties +-- \d fk_partitioned_fk_4_2 CREATE TABLE fk_partitioned_fk_5 (a int, b int, FOREIGN KEY (a, b) REFERENCES fk_notpartitioned_pk(a, b) ON UPDATE CASCADE ON DELETE CASCADE DEFERRABLE, @@ -1563,10 +1593,12 @@ ALTER TABLE fk_partitioned_fk DETACH PARTITION fk_partitioned_fk_5; ALTER TABLE fk_partitioned_fk ATTACH PARTITION fk_partitioned_fk_5 FOR VALUES IN (4500); -- this one has two constraints, similar but not quite the one in the parent, -- so it gets a new one +-- \d fk_partitioned_fk_5 -- verify that it works to reattaching a child with multiple candidate -- constraints ALTER TABLE fk_partitioned_fk_5 DETACH PARTITION fk_partitioned_fk_5_1; ALTER TABLE fk_partitioned_fk_5 ATTACH PARTITION fk_partitioned_fk_5_1 FOR VALUES FROM (0) TO (10); +-- \d fk_partitioned_fk_5_1 -- verify that attaching a table checks that the existing data satisfies the -- constraint @@ -1727,18 +1759,23 @@ create schema fkpart0 create table fk_part_23_2 partition of fk_part_23 for values in (2); alter table fkpart0.fk_part add foreign key (a) references fkpart0.pkey; +-- \d fkpart0.fk_part_1 \\ -- should have only one FK alter table fkpart0.fk_part_1 drop constraint fk_part_1_a_fkey; +-- \d fkpart0.fk_part_23 \\ -- should have only one FK +-- \d fkpart0.fk_part_23_2 \\ -- should have only one FK alter table fkpart0.fk_part_23 drop constraint fk_part_23_a_fkey; alter table fkpart0.fk_part_23_2 drop constraint fk_part_23_a_fkey; create table fkpart0.fk_part_4 partition of fkpart0.fk_part for values in (4); +-- \d fkpart0.fk_part_4 alter table fkpart0.fk_part_4 drop constraint fk_part_a_fkey; create table fkpart0.fk_part_56 partition of fkpart0.fk_part for values in (5,6) partition by list (a); create table fkpart0.fk_part_56_5 partition of fkpart0.fk_part_56 for values in (5); +-- \d fkpart0.fk_part_56 alter table fkpart0.fk_part_56 drop constraint fk_part_a_fkey; alter table fkpart0.fk_part_56_5 drop constraint fk_part_a_fkey; @@ -2320,6 +2357,7 @@ INSERT INTO fk_p VALUES (1, 1); ALTER TABLE fk_r ATTACH PARTITION fk_r_1 FOR VALUES IN (1); ALTER TABLE fk_r ATTACH PARTITION fk_r_2 FOR VALUES IN (2); +-- \d fk_r_2 INSERT INTO fk_r VALUES (1, 1, 1); INSERT INTO fk_r VALUES (2, 2, 1); @@ -2327,6 +2365,7 @@ INSERT INTO fk_r VALUES (2, 2, 1); ALTER TABLE fk_r DETACH PARTITION fk_r_1; ALTER TABLE fk_r DETACH PARTITION fk_r_2; +-- \d fk_r_2 INSERT INTO fk_r_1 (id, p_id, p_jd) VALUES (2, 1, 2); -- should fail DELETE FROM fk_p; -- should fail @@ -2334,6 +2373,7 @@ DELETE FROM fk_p; -- should fail ALTER TABLE fk_r ATTACH PARTITION fk_r_1 FOR VALUES IN (1); ALTER TABLE fk_r ATTACH PARTITION fk_r_2 FOR VALUES IN (2); +-- \d fk_r_2 DELETE FROM fk_p; -- should fail @@ -2346,3 +2386,51 @@ SET client_min_messages TO warning; DROP SCHEMA fkpart12 CASCADE; RESET client_min_messages; RESET search_path; + +-- Exercise the column mapping code with foreign keys. In this test we'll +-- create a partitioned table which has a partition with a dropped column and +-- check to ensure that an UPDATE cascades the changes correctly to the +-- partitioned table. +CREATE SCHEMA fkpart13; +SET search_path TO fkpart13; + +CREATE TABLE fkpart13_t1 (a int PRIMARY KEY); + +CREATE TABLE fkpart13_t2 ( + part_id int PRIMARY KEY, + column_to_drop int, + FOREIGN KEY (part_id) REFERENCES fkpart13_t1 ON UPDATE CASCADE ON DELETE CASCADE +) PARTITION BY LIST (part_id); + +CREATE TABLE fkpart13_t2_p1 PARTITION OF fkpart13_t2 FOR VALUES IN (1); + +-- drop the column +ALTER TABLE fkpart13_t2 DROP COLUMN column_to_drop; + +-- create a new partition without the dropped column +CREATE TABLE fkpart13_t2_p2 PARTITION OF fkpart13_t2 FOR VALUES IN (2); + +CREATE TABLE fkpart13_t3 ( + a int NOT NULL, + FOREIGN KEY (a) + REFERENCES fkpart13_t2 + ON UPDATE CASCADE ON DELETE CASCADE +); + +INSERT INTO fkpart13_t1 (a) VALUES (1); +INSERT INTO fkpart13_t2 (part_id) VALUES (1); +INSERT INTO fkpart13_t3 (a) VALUES (1); + +-- Test a cascading update works correctly with with the dropped column +UPDATE fkpart13_t1 SET a = 2 WHERE a = 1; +SELECT tableoid::regclass,* FROM fkpart13_t2; +SELECT tableoid::regclass,* FROM fkpart13_t3; + +-- Exercise code in ExecGetTriggerResultRel() as there's been previous issues +-- with ResultRelInfos being returned with the incorrect ri_RootResultRelInfo +WITH cte AS ( + UPDATE fkpart13_t2_p1 SET part_id = part_id +) UPDATE fkpart13_t1 SET a = 2 WHERE a = 1; + +DROP SCHEMA fkpart13 CASCADE; +RESET search_path; diff --git a/crates/squawk_parser/tests/data/regression_suite/generated_stored.sql b/crates/squawk_parser/tests/data/regression_suite/generated_stored.sql index 70fe32b1..010512c3 100644 --- a/crates/squawk_parser/tests/data/regression_suite/generated_stored.sql +++ b/crates/squawk_parser/tests/data/regression_suite/generated_stored.sql @@ -12,6 +12,7 @@ SELECT table_name, column_name, column_default, is_nullable, is_generated, gener SELECT table_name, column_name, dependent_column FROM information_schema.column_column_usage WHERE table_schema = 'generated_stored_tests' ORDER BY 1, 2, 3; +-- \d gtest1 -- duplicate generated CREATE TABLE gtest_err_1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED GENERATED ALWAYS AS (a * 3) STORED); @@ -136,6 +137,7 @@ WITH foo AS (SELECT * FROM gtest1) SELECT * FROM foo; -- inheritance CREATE TABLE gtest1_1 () INHERITS (gtest1); SELECT * FROM gtest1_1; +-- \d gtest1_1 INSERT INTO gtest1_1 VALUES (4); SELECT * FROM gtest1_1; SELECT * FROM gtest1; @@ -152,6 +154,7 @@ CREATE TABLE gtestx (x int, b int DEFAULT 10) INHERITS (gtest1); -- error CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS IDENTITY) INHERITS (gtest1); -- error CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) VIRTUAL) INHERITS (gtest1); -- error CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) STORED) INHERITS (gtest1); -- ok, overrides parent +-- \d+ gtestx INSERT INTO gtestx (a, x) VALUES (11, 22); SELECT * FROM gtest1; SELECT * FROM gtestx; @@ -175,6 +178,7 @@ DROP TABLE gtesty; CREATE TABLE gtesty (x int, b int GENERATED ALWAYS AS (x * 22) STORED); CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error CREATE TABLE gtest1_y (b int GENERATED ALWAYS AS (x + 1) STORED) INHERITS (gtest1, gtesty); -- ok +-- \d gtest1_y -- test correct handling of GENERATED column that's only in child CREATE TABLE gtestp (f1 int); @@ -206,8 +210,18 @@ COPY gtest1 TO stdout; COPY gtest1 (a, b) TO stdout; +COPY gtest1 FROM stdin; +-- 3 +-- 4 +-- \. -SELECT * FROM gtest1 ORDER BY a; +COPY gtest1 (a, b) FROM stdin; +-- +COPY gtest1 FROM stdin WHERE b <> 10; +-- +COPY gtest1 FROM stdin WHERE gtest1 IS NULL; +-- +-- SELECT * FROM gtest1 ORDER BY a; TRUNCATE gtest3; INSERT INTO gtest3 (a) VALUES (1), (2); @@ -216,8 +230,14 @@ COPY gtest3 TO stdout; COPY gtest3 (a, b) TO stdout; +COPY gtest3 FROM stdin; +-- 3 +-- 4 +-- \. -SELECT * FROM gtest3 ORDER BY a; +COPY gtest3 (a, b) FROM stdin; +-- +-- SELECT * FROM gtest3 ORDER BY a; -- null values CREATE TABLE gtest2 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (NULL) STORED); @@ -258,6 +278,7 @@ CREATE TABLE gtest10 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (b * 2 ALTER TABLE gtest10 DROP COLUMN b; -- fails ALTER TABLE gtest10 DROP COLUMN b CASCADE; -- drops c too +-- \d gtest10 CREATE TABLE gtest10a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED); ALTER TABLE gtest10a DROP COLUMN b; @@ -344,6 +365,7 @@ CREATE TABLE gtest22c (a int, b int GENERATED ALWAYS AS (a * 2) STORED); CREATE INDEX gtest22c_b_idx ON gtest22c (b); CREATE INDEX gtest22c_expr_idx ON gtest22c ((b * 3)); CREATE INDEX gtest22c_pred_idx ON gtest22c (a) WHERE b > 0; +-- \d gtest22c INSERT INTO gtest22c VALUES (1), (2), (3); SET enable_seqscan TO off; @@ -374,6 +396,7 @@ CREATE TABLE gtest23x (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STOR CREATE TABLE gtest23x (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED REFERENCES gtest23a (x) ON DELETE SET NULL); -- error CREATE TABLE gtest23b (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) STORED REFERENCES gtest23a (x)); +-- \d gtest23b INSERT INTO gtest23b VALUES (1); -- ok INSERT INTO gtest23b VALUES (5); -- error @@ -448,6 +471,9 @@ ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09 DROP TABLE gtest_child3; CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 33) STORED); ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); +-- \d gtest_child +-- \d gtest_child2 +-- \d gtest_child3 INSERT INTO gtest_parent (f1, f2) VALUES ('2016-07-15', 1); INSERT INTO gtest_parent (f1, f2) VALUES ('2016-07-15', 2); INSERT INTO gtest_parent (f1, f2) VALUES ('2016-08-15', 3); @@ -461,16 +487,27 @@ SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; -- alter only parent's and one child's generation expression ALTER TABLE ONLY gtest_parent ALTER COLUMN f3 SET EXPRESSION AS (f2 * 4); ALTER TABLE gtest_child ALTER COLUMN f3 SET EXPRESSION AS (f2 * 10); +-- \d gtest_parent +-- \d gtest_child +-- \d gtest_child2 +-- \d gtest_child3 SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; -- alter generation expression of parent and all its children altogether ALTER TABLE gtest_parent ALTER COLUMN f3 SET EXPRESSION AS (f2 * 2); +-- \d gtest_parent +-- \d gtest_child +-- \d gtest_child2 +-- \d gtest_child3 SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; -- we leave these tables around for purposes of testing dump/reload/upgrade -- generated columns in partition key (not allowed) CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE (f3); +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3)); CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((f3 * 3)); +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((gtest_part_key)); +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) STORED) PARTITION BY RANGE ((gtest_part_key is not null)); -- ALTER TABLE ... ADD COLUMN CREATE TABLE gtest25 (a int PRIMARY KEY); @@ -485,6 +522,7 @@ ALTER TABLE gtest25 ADD COLUMN d int DEFAULT 101; ALTER TABLE gtest25 ALTER COLUMN d SET DATA TYPE float8, ADD COLUMN y float8 GENERATED ALWAYS AS (d * 4) STORED; SELECT * FROM gtest25 ORDER BY a; +-- \d gtest25 -- ALTER TABLE ... ALTER COLUMN CREATE TABLE gtest27 ( @@ -495,6 +533,7 @@ CREATE TABLE gtest27 ( INSERT INTO gtest27 (a, b) VALUES (3, 7), (4, 11); ALTER TABLE gtest27 ALTER COLUMN a TYPE text; -- error ALTER TABLE gtest27 ALTER COLUMN x TYPE numeric; +-- \d gtest27 SELECT * FROM gtest27; ALTER TABLE gtest27 ALTER COLUMN x TYPE boolean USING x <> 0; -- error ALTER TABLE gtest27 ALTER COLUMN x DROP DEFAULT; -- error @@ -504,10 +543,12 @@ ALTER TABLE gtest27 ALTER COLUMN a TYPE bigint, ALTER COLUMN b TYPE bigint, ADD COLUMN x bigint GENERATED ALWAYS AS ((a + b) * 2) STORED; +-- \d gtest27 -- Ideally you could just do this, but not today (and should x change type?): ALTER TABLE gtest27 ALTER COLUMN a TYPE float8, ALTER COLUMN b TYPE float8; -- error +-- \d gtest27 SELECT * FROM gtest27; -- ALTER TABLE ... ALTER COLUMN ... DROP EXPRESSION @@ -517,6 +558,7 @@ CREATE TABLE gtest29 ( ); INSERT INTO gtest29 (a) VALUES (3), (4); SELECT * FROM gtest29; +-- \d gtest29 ALTER TABLE gtest29 ALTER COLUMN a SET EXPRESSION AS (a * 3); -- error ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION; -- error ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION IF EXISTS; -- notice @@ -524,14 +566,17 @@ ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION IF EXISTS; -- notice -- Change the expression ALTER TABLE gtest29 ALTER COLUMN b SET EXPRESSION AS (a * 3); SELECT * FROM gtest29; +-- \d gtest29 ALTER TABLE gtest29 ALTER COLUMN b DROP EXPRESSION; INSERT INTO gtest29 (a) VALUES (5); INSERT INTO gtest29 (a, b) VALUES (6, 66); SELECT * FROM gtest29; +-- \d gtest29 -- check that dependencies between columns have also been removed ALTER TABLE gtest29 DROP COLUMN a; -- should not drop b +-- \d gtest29 -- with inheritance CREATE TABLE gtest30 ( @@ -540,6 +585,8 @@ CREATE TABLE gtest30 ( ); CREATE TABLE gtest30_1 () INHERITS (gtest30); ALTER TABLE gtest30 ALTER COLUMN b DROP EXPRESSION; +-- \d gtest30 +-- \d gtest30_1 DROP TABLE gtest30 CASCADE; CREATE TABLE gtest30 ( a int, @@ -547,12 +594,27 @@ CREATE TABLE gtest30 ( ); CREATE TABLE gtest30_1 () INHERITS (gtest30); ALTER TABLE ONLY gtest30 ALTER COLUMN b DROP EXPRESSION; -- error +-- \d gtest30 +-- \d gtest30_1 ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error -- composite type dependencies CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') STORED, c text); CREATE TABLE gtest31_2 (x int, y gtest31_1); ALTER TABLE gtest31_1 ALTER COLUMN b TYPE varchar; -- fails + +-- bug #18970: these cases are unsupported, but make sure they fail cleanly +ALTER TABLE gtest31_2 ADD CONSTRAINT cc CHECK ((y).b IS NOT NULL); +ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello1'); +ALTER TABLE gtest31_2 DROP CONSTRAINT cc; + +CREATE STATISTICS gtest31_2_stat ON ((y).b is not null) FROM gtest31_2; +ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello2'); +DROP STATISTICS gtest31_2_stat; + +CREATE INDEX gtest31_2_y_idx ON gtest31_2(((y).b)); +ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello3'); + DROP TABLE gtest31_1, gtest31_2; -- Check it for a partitioned table, too @@ -687,6 +749,7 @@ ALTER TABLE gtest28a DROP COLUMN a; CREATE TABLE gtest28b (LIKE gtest28a INCLUDING GENERATED); +-- \d gtest28* -- sanity check of system catalog diff --git a/crates/squawk_parser/tests/data/regression_suite/generated_virtual.sql b/crates/squawk_parser/tests/data/regression_suite/generated_virtual.sql index 5c3f4131..79bc9fb8 100644 --- a/crates/squawk_parser/tests/data/regression_suite/generated_virtual.sql +++ b/crates/squawk_parser/tests/data/regression_suite/generated_virtual.sql @@ -12,6 +12,7 @@ SELECT table_name, column_name, column_default, is_nullable, is_generated, gener SELECT table_name, column_name, dependent_column FROM information_schema.column_column_usage WHERE table_schema = 'generated_virtual_tests' ORDER BY 1, 2, 3; +-- \d gtest1 -- duplicate generated CREATE TABLE gtest_err_1 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL GENERATED ALWAYS AS (a * 3) VIRTUAL); @@ -136,6 +137,7 @@ WITH foo AS (SELECT * FROM gtest1) SELECT * FROM foo; -- inheritance CREATE TABLE gtest1_1 () INHERITS (gtest1); SELECT * FROM gtest1_1; +-- \d gtest1_1 INSERT INTO gtest1_1 VALUES (4); SELECT * FROM gtest1_1; SELECT * FROM gtest1; @@ -152,6 +154,7 @@ CREATE TABLE gtestx (x int, b int DEFAULT 10) INHERITS (gtest1); -- error CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS IDENTITY) INHERITS (gtest1); -- error CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) STORED) INHERITS (gtest1); -- error CREATE TABLE gtestx (x int, b int GENERATED ALWAYS AS (a * 22) VIRTUAL) INHERITS (gtest1); -- ok, overrides parent +-- \d+ gtestx INSERT INTO gtestx (a, x) VALUES (11, 22); SELECT * FROM gtest1; SELECT * FROM gtestx; @@ -175,6 +178,7 @@ DROP TABLE gtesty; CREATE TABLE gtesty (x int, b int GENERATED ALWAYS AS (x * 22) VIRTUAL); CREATE TABLE gtest1_y () INHERITS (gtest1, gtesty); -- error CREATE TABLE gtest1_y (b int GENERATED ALWAYS AS (x + 1) VIRTUAL) INHERITS (gtest1, gtesty); -- ok +-- \d gtest1_y -- test correct handling of GENERATED column that's only in child CREATE TABLE gtestp (f1 int); @@ -206,8 +210,18 @@ COPY gtest1 TO stdout; COPY gtest1 (a, b) TO stdout; +COPY gtest1 FROM stdin; +-- 3 +-- 4 +-- \. -SELECT * FROM gtest1 ORDER BY a; +COPY gtest1 (a, b) FROM stdin; +-- +COPY gtest1 FROM stdin WHERE b <> 10; +-- +COPY gtest1 FROM stdin WHERE gtest1 IS NULL; +-- +-- SELECT * FROM gtest1 ORDER BY a; TRUNCATE gtest3; INSERT INTO gtest3 (a) VALUES (1), (2); @@ -216,8 +230,14 @@ COPY gtest3 TO stdout; COPY gtest3 (a, b) TO stdout; +COPY gtest3 FROM stdin; +-- 3 +-- 4 +-- \. -SELECT * FROM gtest3 ORDER BY a; +COPY gtest3 (a, b) FROM stdin; +-- +-- SELECT * FROM gtest3 ORDER BY a; -- null values CREATE TABLE gtest2 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (NULL) VIRTUAL); @@ -237,10 +257,10 @@ CREATE TABLE gtest4 ( a int, b double_int GENERATED ALWAYS AS ((a * 2, a * 3)) VIRTUAL ); -INSERT INTO gtest4 VALUES (1), (6); -SELECT * FROM gtest4; +--INSERT INTO gtest4 VALUES (1), (6); +--SELECT * FROM gtest4; -DROP TABLE gtest4; +--DROP TABLE gtest4; DROP TYPE double_int; -- using tableoid is allowed @@ -258,6 +278,7 @@ CREATE TABLE gtest10 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (b * 2 ALTER TABLE gtest10 DROP COLUMN b; -- fails ALTER TABLE gtest10 DROP COLUMN b CASCADE; -- drops c too +-- \d gtest10 CREATE TABLE gtest10a (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL); ALTER TABLE gtest10a DROP COLUMN b; @@ -273,20 +294,21 @@ GRANT SELECT (a, c) ON gtest11 TO regress_user11; CREATE FUNCTION gf1(a int) RETURNS int AS $$ SELECT a * 3 $$ IMMUTABLE LANGUAGE SQL; REVOKE ALL ON FUNCTION gf1(int) FROM PUBLIC; -CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -INSERT INTO gtest12 VALUES (1, 10), (2, 20); -GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11; +CREATE TABLE gtest12 (a int PRIMARY KEY, b int, c int GENERATED ALWAYS AS (gf1(b)) VIRTUAL); -- fails, user-defined function +--INSERT INTO gtest12 VALUES (1, 10), (2, 20); +--GRANT SELECT (a, c), INSERT ON gtest12 TO regress_user11; SET ROLE regress_user11; SELECT a, b FROM gtest11; -- not allowed SELECT a, c FROM gtest11; -- allowed SELECT gf1(10); -- not allowed -INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function) -SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed +--INSERT INTO gtest12 VALUES (3, 30), (4, 40); -- allowed (does not actually invoke the function) +--SELECT a, c FROM gtest12; -- currently not allowed because of function permissions, should arguably be allowed RESET ROLE; -DROP FUNCTION gf1(int); -- fail -DROP TABLE gtest11, gtest12; +--DROP FUNCTION gf1(int); -- fail +DROP TABLE gtest11; +--DROP TABLE gtest12; DROP FUNCTION gf1(int); DROP USER regress_user11; @@ -436,11 +458,19 @@ CREATE TABLE gtest24r (a int PRIMARY KEY, b gtestdomain1range GENERATED ALWAYS A --INSERT INTO gtest24r (a) VALUES (4); -- ok --INSERT INTO gtest24r (a) VALUES (6); -- error +CREATE TABLE gtest24at (a int PRIMARY KEY); +ALTER TABLE gtest24at ADD COLUMN b gtestdomain1 GENERATED ALWAYS AS (a * 2) VIRTUAL; -- error +CREATE TABLE gtest24ata (a int PRIMARY KEY, b int GENERATED ALWAYS AS (a * 2) VIRTUAL); +ALTER TABLE gtest24ata ALTER COLUMN b TYPE gtestdomain1; -- error + CREATE DOMAIN gtestdomainnn AS int CHECK (VALUE IS NOT NULL); CREATE TABLE gtest24nn (a int, b gtestdomainnn GENERATED ALWAYS AS (a * 2) VIRTUAL); --INSERT INTO gtest24nn (a) VALUES (4); -- ok --INSERT INTO gtest24nn (a) VALUES (NULL); -- error +-- using user-defined type not yet supported +CREATE TABLE gtest24xxx (a gtestdomain1, b gtestdomain1, c int GENERATED ALWAYS AS (greatest(a, b)) VIRTUAL); -- error + -- typed tables (currently not supported) CREATE TYPE gtest_type AS (f1 integer, f2 text, f3 bigint); CREATE TABLE gtest28 OF gtest_type (f1 WITH OPTIONS GENERATED ALWAYS AS (f2 *2) VIRTUAL); @@ -484,6 +514,9 @@ ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09 DROP TABLE gtest_child3; CREATE TABLE gtest_child3 (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 33) VIRTUAL); ALTER TABLE gtest_parent ATTACH PARTITION gtest_child3 FOR VALUES FROM ('2016-09-01') TO ('2016-10-01'); +-- \d gtest_child +-- \d gtest_child2 +-- \d gtest_child3 INSERT INTO gtest_parent (f1, f2) VALUES ('2016-07-15', 1); INSERT INTO gtest_parent (f1, f2) VALUES ('2016-07-15', 2); INSERT INTO gtest_parent (f1, f2) VALUES ('2016-08-15', 3); @@ -497,16 +530,27 @@ SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; -- alter only parent's and one child's generation expression ALTER TABLE ONLY gtest_parent ALTER COLUMN f3 SET EXPRESSION AS (f2 * 4); ALTER TABLE gtest_child ALTER COLUMN f3 SET EXPRESSION AS (f2 * 10); +-- \d gtest_parent +-- \d gtest_child +-- \d gtest_child2 +-- \d gtest_child3 SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; -- alter generation expression of parent and all its children altogether ALTER TABLE gtest_parent ALTER COLUMN f3 SET EXPRESSION AS (f2 * 2); +-- \d gtest_parent +-- \d gtest_child +-- \d gtest_child2 +-- \d gtest_child3 SELECT tableoid::regclass, * FROM gtest_parent ORDER BY 1, 2, 3; -- we leave these tables around for purposes of testing dump/reload/upgrade -- generated columns in partition key (not allowed) CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) VIRTUAL) PARTITION BY RANGE (f3); +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) VIRTUAL) PARTITION BY RANGE ((f3)); CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) VIRTUAL) PARTITION BY RANGE ((f3 * 3)); +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) VIRTUAL) PARTITION BY RANGE ((gtest_part_key)); +CREATE TABLE gtest_part_key (f1 date NOT NULL, f2 bigint, f3 bigint GENERATED ALWAYS AS (f2 * 2) VIRTUAL) PARTITION BY RANGE ((gtest_part_key is not null)); -- ALTER TABLE ... ADD COLUMN CREATE TABLE gtest25 (a int PRIMARY KEY); @@ -521,6 +565,7 @@ ALTER TABLE gtest25 ADD COLUMN d int DEFAULT 101; ALTER TABLE gtest25 ALTER COLUMN d SET DATA TYPE float8, ADD COLUMN y float8 GENERATED ALWAYS AS (d * 4) VIRTUAL; SELECT * FROM gtest25 ORDER BY a; +-- \d gtest25 -- ALTER TABLE ... ALTER COLUMN CREATE TABLE gtest27 ( @@ -531,6 +576,7 @@ CREATE TABLE gtest27 ( INSERT INTO gtest27 (a, b) VALUES (3, 7), (4, 11); ALTER TABLE gtest27 ALTER COLUMN a TYPE text; -- error ALTER TABLE gtest27 ALTER COLUMN x TYPE numeric; +-- \d gtest27 SELECT * FROM gtest27; ALTER TABLE gtest27 ALTER COLUMN x TYPE boolean USING x <> 0; -- error ALTER TABLE gtest27 ALTER COLUMN x DROP DEFAULT; -- error @@ -548,10 +594,12 @@ ALTER TABLE gtest27 ALTER COLUMN a TYPE bigint, ALTER COLUMN b TYPE bigint, ADD COLUMN x bigint GENERATED ALWAYS AS ((a + b) * 2) VIRTUAL; +-- \d gtest27 -- Ideally you could just do this, but not today (and should x change type?): ALTER TABLE gtest27 ALTER COLUMN a TYPE float8, ALTER COLUMN b TYPE float8; -- error +-- \d gtest27 SELECT * FROM gtest27; -- ALTER TABLE ... ALTER COLUMN ... DROP EXPRESSION @@ -561,6 +609,7 @@ CREATE TABLE gtest29 ( ); INSERT INTO gtest29 (a) VALUES (3), (4); SELECT * FROM gtest29; +-- \d gtest29 ALTER TABLE gtest29 ALTER COLUMN a SET EXPRESSION AS (a * 3); -- error ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION; -- error ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION IF EXISTS; -- notice @@ -568,11 +617,13 @@ ALTER TABLE gtest29 ALTER COLUMN a DROP EXPRESSION IF EXISTS; -- notice -- Change the expression ALTER TABLE gtest29 ALTER COLUMN b SET EXPRESSION AS (a * 3); SELECT * FROM gtest29; +-- \d gtest29 ALTER TABLE gtest29 ALTER COLUMN b DROP EXPRESSION; -- not supported INSERT INTO gtest29 (a) VALUES (5); INSERT INTO gtest29 (a, b) VALUES (6, 66); SELECT * FROM gtest29; +-- \d gtest29 -- check that dependencies between columns have also been removed --ALTER TABLE gtest29 DROP COLUMN a; -- should not drop b @@ -585,6 +636,8 @@ CREATE TABLE gtest30 ( ); CREATE TABLE gtest30_1 () INHERITS (gtest30); ALTER TABLE gtest30 ALTER COLUMN b DROP EXPRESSION; +-- \d gtest30 +-- \d gtest30_1 DROP TABLE gtest30 CASCADE; CREATE TABLE gtest30 ( a int, @@ -592,12 +645,27 @@ CREATE TABLE gtest30 ( ); CREATE TABLE gtest30_1 () INHERITS (gtest30); ALTER TABLE ONLY gtest30 ALTER COLUMN b DROP EXPRESSION; -- error +-- \d gtest30 +-- \d gtest30_1 ALTER TABLE gtest30_1 ALTER COLUMN b DROP EXPRESSION; -- error -- composite type dependencies CREATE TABLE gtest31_1 (a int, b text GENERATED ALWAYS AS ('hello') VIRTUAL, c text); CREATE TABLE gtest31_2 (x int, y gtest31_1); ALTER TABLE gtest31_1 ALTER COLUMN b TYPE varchar; -- fails + +-- bug #18970 +ALTER TABLE gtest31_2 ADD CONSTRAINT cc CHECK ((y).b IS NOT NULL); +ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello1'); +ALTER TABLE gtest31_2 DROP CONSTRAINT cc; + +CREATE STATISTICS gtest31_2_stat ON ((y).b is not null) FROM gtest31_2; +ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello2'); +DROP STATISTICS gtest31_2_stat; + +CREATE INDEX gtest31_2_y_idx ON gtest31_2(((y).b)); +ALTER TABLE gtest31_1 ALTER COLUMN b SET EXPRESSION AS ('hello3'); + DROP TABLE gtest31_1, gtest31_2; -- Check it for a partitioned table, too @@ -732,6 +800,7 @@ ALTER TABLE gtest28a DROP COLUMN a; CREATE TABLE gtest28b (LIKE gtest28a INCLUDING GENERATED); +-- \d gtest28* -- sanity check of system catalog @@ -748,7 +817,8 @@ create table gtest32 ( a int primary key, b int generated always as (a * 2), c int generated always as (10 + 10), - d int generated always as (coalesce(a, 100)) + d int generated always as (coalesce(a, 100)), + e int ); insert into gtest32 values (1), (2); @@ -789,7 +859,34 @@ select t2.* from gtest32 t1 left join gtest32 t2 on false; select t2.* from gtest32 t1 left join gtest32 t2 on false; explain (verbose, costs off) -select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20; -select * from gtest32 t group by grouping sets (a, b, c, d) having c = 20; +select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20; +select * from gtest32 t group by grouping sets (a, b, c, d, e) having c = 20; + +-- Ensure that the virtual generated columns in ALTER COLUMN TYPE USING expression are expanded +alter table gtest32 alter column e type bigint using b; + +-- Ensure that virtual generated column references within SubLinks that should +-- be transformed into joins can get expanded +explain (costs off) +select 1 from gtest32 t1 where exists + (select 1 from gtest32 t2 where t1.a > t2.a and t2.b = 2); + +select 1 from gtest32 t1 where exists + (select 1 from gtest32 t2 where t1.a > t2.a and t2.b = 2); drop table gtest32; + +-- Ensure that virtual generated columns in constraint expressions are expanded +create table gtest33 (a int, b int generated always as (a * 2) virtual not null, check (b > 10)); +set constraint_exclusion to on; + +-- should get a dummy Result, not a seq scan +explain (costs off) +select * from gtest33 where b < 10; + +-- should get a dummy Result, not a seq scan +explain (costs off) +select * from gtest33 where b is null; + +reset constraint_exclusion; +drop table gtest33; diff --git a/crates/squawk_parser/tests/data/regression_suite/groupingsets.sql b/crates/squawk_parser/tests/data/regression_suite/groupingsets.sql index 21b6d1dc..3f87e47d 100644 --- a/crates/squawk_parser/tests/data/regression_suite/groupingsets.sql +++ b/crates/squawk_parser/tests/data/regression_suite/groupingsets.sql @@ -22,11 +22,13 @@ copy gstest2 from stdin; -- 1 1 2 2 2 2 2 2 -- 1 2 2 2 2 2 2 2 -- 2 2 2 2 2 2 2 2 +-- \. create temp table gstest3 (a integer, b integer, c integer, d integer); copy gstest3 from stdin; -- 1 1 1 1 -- 2 2 2 2 +-- \. alter table gstest3 add primary key (a); create temp table gstest4(id integer, v integer, @@ -288,11 +290,29 @@ explain (costs off) select v.c, (select count(*) from gstest2 group by () having v.c) from (values (false),(true)) v(c) order by v.c; --- test pushdown of HAVING clause that does not reference any columns that are nullable by grouping sets +-- test pushdown of non-degenerate HAVING clause that does not reference any +-- columns that are nullable by grouping sets explain (costs off) select a, b, count(*) from gstest2 group by grouping sets ((a, b), (a)) having a > 1 and b > 1; select a, b, count(*) from gstest2 group by grouping sets ((a, b), (a)) having a > 1 and b > 1; +explain (costs off) +select a, b, count(*) from gstest2 group by rollup(a), b having b > 1; +select a, b, count(*) from gstest2 group by rollup(a), b having b > 1; + +-- test pushdown of degenerate HAVING clause +explain (costs off) +select count(*) from gstest2 group by grouping sets (()) having false; +select count(*) from gstest2 group by grouping sets (()) having false; + +explain (costs off) +select a, count(*) from gstest2 group by grouping sets ((a), ()) having false; +select a, count(*) from gstest2 group by grouping sets ((a), ()) having false; + +explain (costs off) +select a, b, count(*) from gstest2 group by grouping sets ((a), (b)) having false; +select a, b, count(*) from gstest2 group by grouping sets ((a), (b)) having false; + -- HAVING with GROUPING queries select ten, grouping(ten) from onek group by grouping sets(ten) having grouping(ten) >= 0 diff --git a/crates/squawk_parser/tests/data/regression_suite/guc.sql b/crates/squawk_parser/tests/data/regression_suite/guc.sql index f65f84a2..bafaf067 100644 --- a/crates/squawk_parser/tests/data/regression_suite/guc.sql +++ b/crates/squawk_parser/tests/data/regression_suite/guc.sql @@ -12,6 +12,15 @@ SHOW vacuum_cost_delay; SHOW datestyle; SELECT '2006-08-13 12:34:56'::timestamptz; +-- Check handling of list GUCs +SET search_path = 'pg_catalog', Foo, 'Bar', ''; +SHOW search_path; +SET search_path = null; -- means empty list +SHOW search_path; +SET search_path = null, null; -- syntax error +SET enable_seqscan = null; -- error +RESET search_path; + -- SET LOCAL has no effect outside of a transaction SET LOCAL vacuum_cost_delay TO 50; SHOW vacuum_cost_delay; diff --git a/crates/squawk_parser/tests/data/regression_suite/hash_index.sql b/crates/squawk_parser/tests/data/regression_suite/hash_index.sql index a944204a..769fbebb 100644 --- a/crates/squawk_parser/tests/data/regression_suite/hash_index.sql +++ b/crates/squawk_parser/tests/data/regression_suite/hash_index.sql @@ -3,6 +3,7 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR CREATE TABLE hash_i4_heap ( seqno int4, @@ -24,6 +25,7 @@ CREATE TABLE hash_f8_heap ( random float8 ); +-- \set filename :abs_srcdir '/data/hash.data' COPY hash_i4_heap FROM 'filename'; COPY hash_name_heap FROM 'filename'; COPY hash_txt_heap FROM 'filename'; @@ -34,8 +36,8 @@ COPY hash_f8_heap FROM 'filename'; -- this is therefore a stress test of the bucket overflow code (unlike -- the data in hash.data, which has unique index keys). -- --- \set filename 'abs_srcdir' '/data/hashovfl.data' --- COPY hash_ovfl_heap FROM 'filename'; +-- \set filename :abs_srcdir '/data/hashovfl.data' +-- COPY hash_ovfl_heap FROM :'filename'; ANALYZE hash_i4_heap; ANALYZE hash_name_heap; @@ -51,6 +53,9 @@ CREATE INDEX hash_txt_index ON hash_txt_heap USING hash (random text_ops); CREATE INDEX hash_f8_index ON hash_f8_heap USING hash (random float8_ops) WITH (fillfactor=60); +CREATE INDEX hash_i4_partial_index ON hash_i4_heap USING hash (seqno) + WHERE seqno = 9999; + -- -- Also try building functional, expressional, and partial indexes on -- tables that already contain data. @@ -115,6 +120,16 @@ SELECT * FROM hash_f8_heap SELECT * FROM hash_f8_heap WHERE hash_f8_heap.random = '88888888'::float8; +-- +-- partial hash index +-- +EXPLAIN (COSTS OFF) +SELECT * FROM hash_i4_heap + WHERE seqno = 9999; + +SELECT * FROM hash_i4_heap + WHERE seqno = 9999; + -- -- hash index -- grep '^90[^0-9]' hashovfl.data diff --git a/crates/squawk_parser/tests/data/regression_suite/horology.sql b/crates/squawk_parser/tests/data/regression_suite/horology.sql index 1310b432..8978249a 100644 --- a/crates/squawk_parser/tests/data/regression_suite/horology.sql +++ b/crates/squawk_parser/tests/data/regression_suite/horology.sql @@ -102,6 +102,10 @@ SELECT date 'J J 1520447'; SELECT timestamp with time zone 'Y2001M12D27H04M05S06.789+08'; SELECT timestamp with time zone 'Y2001M12D27H04MM05S06.789-08'; +-- More examples we used to accept and should not +SELECT timestamp with time zone 'J2452271 T X03456-08'; +SELECT timestamp with time zone 'J2452271 T X03456.001e6-08'; + -- conflicting fields should throw errors SELECT date '1995-08-06 epoch'; SELECT date '1995-08-06 infinity'; diff --git a/crates/squawk_parser/tests/data/regression_suite/identity.sql b/crates/squawk_parser/tests/data/regression_suite/identity.sql index 4b656e6d..d2c5f60a 100644 --- a/crates/squawk_parser/tests/data/regression_suite/identity.sql +++ b/crates/squawk_parser/tests/data/regression_suite/identity.sql @@ -14,6 +14,7 @@ SELECT sequence_name FROM information_schema.sequences WHERE sequence_name LIKE SELECT pg_get_serial_sequence('itest1', 'a'); +-- \d itest1_a_seq CREATE TABLE itest4 (a int, b text); ALTER TABLE itest4 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; -- error, requires NOT NULL @@ -130,7 +131,15 @@ SELECT * FROM itest2; CREATE TABLE itest9 (a int GENERATED ALWAYS AS IDENTITY, b text, c bigint); +COPY itest9 FROM stdin; +-- 100 foo 200 +-- 101 bar 201 +-- \. +COPY itest9 (b, c) FROM stdin; +-- foo2 202 +-- bar2 203 +-- \. SELECT * FROM itest9 ORDER BY c; @@ -202,6 +211,7 @@ ALTER TABLE itest5 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; ALTER TABLE itest3 ALTER COLUMN a TYPE int; SELECT seqtypid::regtype FROM pg_sequence WHERE seqrelid = 'itest3_a_seq'::regclass; +-- \d itest3 ALTER TABLE itest3 ALTER COLUMN a TYPE text; -- error @@ -209,15 +219,25 @@ ALTER TABLE itest3 ALTER COLUMN a TYPE text; -- error CREATE UNLOGGED TABLE itest17 (a int NOT NULL, b text); ALTER TABLE itest17 ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; ALTER TABLE itest17 ADD COLUMN c int GENERATED ALWAYS AS IDENTITY; +-- \d itest17 +-- \d itest17_a_seq +-- \d itest17_c_seq CREATE TABLE itest18 (a int NOT NULL, b text); ALTER TABLE itest18 SET UNLOGGED, ALTER COLUMN a ADD GENERATED ALWAYS AS IDENTITY; +-- \d itest18 +-- \d itest18_a_seq ALTER TABLE itest18 SET LOGGED; +-- \d itest18 +-- \d itest18_a_seq ALTER TABLE itest18 SET UNLOGGED; +-- \d itest18 +-- \d itest18_a_seq -- kinda silly to change property in the same command, but it should work ALTER TABLE itest3 ADD COLUMN c int GENERATED BY DEFAULT AS IDENTITY, ALTER COLUMN c SET GENERATED ALWAYS; +-- \d itest3 -- ALTER COLUMN ... SET @@ -315,6 +335,11 @@ ALTER TABLE itest8 ALTER COLUMN f22 ADD GENERATED ALWAYS AS IDENTITY; TABLE itest8; +-- \d+ itest8 +-- \d itest8_f2_seq +-- \d itest8_f3_seq +-- \d itest8_f4_seq +-- \d itest8_f5_seq DROP TABLE itest8; diff --git a/crates/squawk_parser/tests/data/regression_suite/incremental_sort.sql b/crates/squawk_parser/tests/data/regression_suite/incremental_sort.sql index f1f8fae5..bbe658a7 100644 --- a/crates/squawk_parser/tests/data/regression_suite/incremental_sort.sql +++ b/crates/squawk_parser/tests/data/regression_suite/incremental_sort.sql @@ -298,3 +298,27 @@ explain (costs off) select * from (select * from tenk1 order by four) t1 join tenk1 t2 on t1.four = t2.four and t1.two = t2.two order by t1.four, t1.two limit 1; + +-- +-- Test incremental sort for Append/MergeAppend +-- +create table prt_tbl (a int, b int) partition by range (a); +create table prt_tbl_1 partition of prt_tbl for values from (0) to (100); +create table prt_tbl_2 partition of prt_tbl for values from (100) to (200); +insert into prt_tbl select i%200, i from generate_series(1,1000)i; +create index on prt_tbl_1(a); +create index on prt_tbl_2(a, b); +analyze prt_tbl; + +set enable_seqscan to off; +set enable_bitmapscan to off; + +-- Ensure we get an incremental sort for the subpath of Append +explain (costs off) select * from prt_tbl order by a, b; + +-- Ensure we get an incremental sort for the subpath of MergeAppend +explain (costs off) select * from prt_tbl_1 union all select * from prt_tbl_2 order by a, b; + +reset enable_bitmapscan; +reset enable_seqscan; +drop table prt_tbl; diff --git a/crates/squawk_parser/tests/data/regression_suite/index_including.sql b/crates/squawk_parser/tests/data/regression_suite/index_including.sql index 965407d5..43c1cacb 100644 --- a/crates/squawk_parser/tests/data/regression_suite/index_including.sql +++ b/crates/squawk_parser/tests/data/regression_suite/index_including.sql @@ -14,6 +14,7 @@ CREATE INDEX ON tbl_include_reg (c1, c2) INCLUDE (c1, c3); SELECT pg_get_indexdef(i.indexrelid) FROM pg_index i JOIN pg_class c ON i.indexrelid = c.oid WHERE i.indrelid = 'tbl_include_reg'::regclass ORDER BY c.relname; +-- \d tbl_include_reg_idx -- Unique index and unique constraint CREATE TABLE tbl_include_unique1 (c1 int, c2 int, c3 int, c4 box); @@ -214,6 +215,7 @@ CREATE TABLE tbl (c1 int,c2 int, c3 int, c4 box, UNIQUE(c1, c2) INCLUDE(c3,c4)); INSERT INTO tbl SELECT x, 2*x, 3*x, box('4,4,4,4') FROM generate_series(1,10) AS x; ALTER TABLE tbl ALTER c1 TYPE bigint; ALTER TABLE tbl ALTER c3 TYPE bigint; +-- \d tbl DROP TABLE tbl; /* diff --git a/crates/squawk_parser/tests/data/regression_suite/index_including_gist.sql b/crates/squawk_parser/tests/data/regression_suite/index_including_gist.sql index bcd014a3..0a9ea7d7 100644 --- a/crates/squawk_parser/tests/data/regression_suite/index_including_gist.sql +++ b/crates/squawk_parser/tests/data/regression_suite/index_including_gist.sql @@ -76,6 +76,7 @@ INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM CREATE INDEX tbl_gist_idx ON tbl_gist using gist (c4) INCLUDE (c1,c3); ALTER TABLE tbl_gist ALTER c1 TYPE bigint; ALTER TABLE tbl_gist ALTER c3 TYPE bigint; +-- \d tbl_gist DROP TABLE tbl_gist; /* @@ -85,4 +86,5 @@ CREATE TABLE tbl_gist (c1 int, c2 int, c3 int, c4 box, EXCLUDE USING gist (c4 WI INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(x,x+1),point(2*x,2*x+1)) FROM generate_series(1,10) AS x; INSERT INTO tbl_gist SELECT x, 2*x, 3*x, box(point(3*x,2*x),point(3*x+1,2*x+1)) FROM generate_series(1,10) AS x; EXPLAIN (costs off) SELECT * FROM tbl_gist where c4 <@ box(point(1,1),point(10,10)); +-- \d tbl_gist DROP TABLE tbl_gist; diff --git a/crates/squawk_parser/tests/data/regression_suite/indexing.sql b/crates/squawk_parser/tests/data/regression_suite/indexing.sql index 107a7fa7..466655f5 100644 --- a/crates/squawk_parser/tests/data/regression_suite/indexing.sql +++ b/crates/squawk_parser/tests/data/regression_suite/indexing.sql @@ -58,7 +58,11 @@ create table idxpart (a int, b int, c text) partition by range (a); create index idxparti on idxpart (a); create index idxparti2 on idxpart (b, c); create table idxpart1 (like idxpart); +-- \d idxpart1 alter table idxpart attach partition idxpart1 for values from (0) to (10); +-- \d idxpart1 +-- \d+ idxpart1_a_idx +-- \d+ idxpart1_b_c_idx -- Forbid ALTER TABLE when attaching or detaching an index to a partition. create index idxpart_c on only idxpart (c); @@ -76,6 +80,7 @@ create table idxpart (a int, b int) partition by range (a, b); create table idxpart1 partition of idxpart for values from (0, 0) to (10, 10); create index on idxpart1 (a, b); create index on idxpart (a, b); +-- \d idxpart1 select relname, relkind, relhassubclass, inhparent::regclass from pg_class left join pg_index ix on (indexrelid = oid) left join pg_inherits on (ix.indexrelid = inhrelid) @@ -146,6 +151,7 @@ create index on idxpart1 ((a + 0)); create index on idxpart1 (a, a); create index on idxpart (a); alter table idxpart attach partition idxpart1 for values from (0) to (1000); +-- \d idxpart1 drop table idxpart; -- If CREATE INDEX ONLY, don't create indexes on partitions; and existing @@ -162,6 +168,9 @@ create index on only idxpart2 (a); create index on idxpart (a); -- Here we expect that idxpart1 and idxpart2 have a new index, but idxpart21 -- does not; also, idxpart22 is not attached. +-- \d idxpart1 +-- \d idxpart2 +-- \d idxpart21 select indexrelid::regclass, indrelid::regclass, inhparent::regclass from pg_index idx left join pg_inherits inh on (idx.indexrelid = inh.inhrelid) where indexrelid::regclass::text like 'idxpart%' @@ -173,9 +182,11 @@ where indexrelid::regclass::text like 'idxpart%' order by indexrelid::regclass::text collate "C"; -- attaching idxpart22 is not enough to set idxpart22_a_idx valid ... alter index idxpart2_a_idx attach partition idxpart22_a_idx; +-- \d idxpart2 -- ... but this one is. create index on idxpart21 (a); alter index idxpart2_a_idx attach partition idxpart21_a_idx; +-- \d idxpart2 drop table idxpart; -- When a table is attached a partition and it already has an index, a @@ -185,22 +196,26 @@ create table idxpart (a int, b int, c text, d bool) partition by range (a); create index idxparti on idxpart (a); create index idxparti2 on idxpart (b, c); create table idxpart1 (like idxpart including indexes); +-- \d idxpart1 select relname, relkind, inhparent::regclass from pg_class left join pg_index ix on (indexrelid = oid) left join pg_inherits on (ix.indexrelid = inhrelid) where relname like 'idxpart%' order by relname; alter table idxpart attach partition idxpart1 for values from (0) to (10); +-- \d idxpart1 select relname, relkind, inhparent::regclass from pg_class left join pg_index ix on (indexrelid = oid) left join pg_inherits on (ix.indexrelid = inhrelid) where relname like 'idxpart%' order by relname; -- While here, also check matching when creating an index after the fact. create index on idxpart1 ((a+b)) where d = true; +-- \d idxpart1 select relname, relkind, inhparent::regclass from pg_class left join pg_index ix on (indexrelid = oid) left join pg_inherits on (ix.indexrelid = inhrelid) where relname like 'idxpart%' order by relname; create index idxparti3 on idxpart ((a+b)) where d = true; +-- \d idxpart1 select relname, relkind, inhparent::regclass from pg_class left join pg_index ix on (indexrelid = oid) left join pg_inherits on (ix.indexrelid = inhrelid) @@ -275,7 +290,9 @@ create index on idxpart(c); create table idxpart1 partition of idxpart for values from (0) to (250); create table idxpart2 partition of idxpart for values from (250) to (500); alter table idxpart detach partition idxpart2; +-- \d idxpart2 alter table idxpart2 drop column c; +-- \d idxpart2 drop table idxpart, idxpart2; -- Verify that expression indexes inherit correctly @@ -417,6 +434,8 @@ create index on idxpart1 (col_keep); create table idxpart (col_keep int) partition by range (col_keep); create index on idxpart (col_keep); alter table idxpart attach partition idxpart1 for values from (0) to (1000); +-- \d idxpart +-- \d idxpart1 select attrelid::regclass, attname, attnum from pg_attribute where attrelid::regclass::text like 'idxpart%' and attnum > 0 order by attrelid::regclass, attnum; @@ -431,6 +450,8 @@ create table idxpart1 (col_keep int); create index on idxpart1 (col_keep); create index on idxpart (col_keep); alter table idxpart attach partition idxpart1 for values from (0) to (1000); +-- \d idxpart +-- \d idxpart1 select attrelid::regclass, attname, attnum from pg_attribute where attrelid::regclass::text like 'idxpart%' and attnum > 0 order by attrelid::regclass, attnum; @@ -442,12 +463,14 @@ drop table idxpart; -- Verify that it works to add primary key / unique to partitioned tables create table idxpart (a int primary key, b int) partition by range (a); +-- \d idxpart -- multiple primary key on child should fail create table failpart partition of idxpart (b primary key) for values from (0) to (100); drop table idxpart; -- primary key on child is okay if there's no PK in the parent, though create table idxpart (a int) partition by range (a); create table idxpart1pk partition of idxpart (a primary key) for values from (0) to (100); +-- \d idxpart1pk drop table idxpart; -- Failing to use the full partition key is not allowed @@ -490,13 +513,16 @@ create table idxpart (a int unique, b int) partition by range ((b + a)); create table idxpart (a int, b int, c text) partition by range (a, b); alter table idxpart add primary key (a); -- not an incomplete one though alter table idxpart add primary key (a, b); -- this works +-- \d idxpart create table idxpart1 partition of idxpart for values from (0, 0) to (1000, 1000); +-- \d idxpart1 drop table idxpart; -- use ALTER TABLE to add a unique constraint create table idxpart (a int, b int) partition by range (a, b); alter table idxpart add unique (a); -- not an incomplete one though alter table idxpart add unique (b, a); -- this works +-- \d idxpart drop table idxpart; -- Exclusion constraints can be added if partitioning by their equal column @@ -796,6 +822,10 @@ create index on parted_index_col_drop (b); create index on parted_index_col_drop (c); create index on parted_index_col_drop (b, c); alter table parted_index_col_drop drop column c; +-- \d parted_index_col_drop +-- \d parted_index_col_drop1 +-- \d parted_index_col_drop2 +-- \d parted_index_col_drop11 drop table parted_index_col_drop; -- Check that invalid indexes are not selected when attaching a partition. @@ -893,7 +923,7 @@ drop table parted_replica_tab; create table test_pg_index_toast_table (a int); create or replace function test_pg_index_toast_func (a int, b int[]) returns bool as $$ select true $$ language sql immutable; -select array_agg(n) b from generate_series(1, 10000) n ; +select array_agg(n) b from generate_series(1, 10000) n /* \gset */; create index concurrently test_pg_index_toast_index on test_pg_index_toast_table (test_pg_index_toast_func(a, 'b')); reindex index concurrently test_pg_index_toast_index; diff --git a/crates/squawk_parser/tests/data/regression_suite/indirect_toast.sql b/crates/squawk_parser/tests/data/regression_suite/indirect_toast.sql index 71b2258b..d2e4d1f4 100644 --- a/crates/squawk_parser/tests/data/regression_suite/indirect_toast.sql +++ b/crates/squawk_parser/tests/data/regression_suite/indirect_toast.sql @@ -3,7 +3,10 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix CREATE FUNCTION make_tuple_indirect (record) RETURNS record diff --git a/crates/squawk_parser/tests/data/regression_suite/infinite_recurse.sql b/crates/squawk_parser/tests/data/regression_suite/infinite_recurse.sql index 5afec587..f0823d33 100644 --- a/crates/squawk_parser/tests/data/regression_suite/infinite_recurse.sql +++ b/crates/squawk_parser/tests/data/regression_suite/infinite_recurse.sql @@ -14,11 +14,16 @@ create function infinite_recurse() returns int as -- difference in the end state of the regression database.) SELECT version() ~ 'powerpc64[^,]*-linux-gnu' - AS skip_test ; + AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif -- The full error report is not very stable, so we show only SQLSTATE -- and primary error message. +-- \set VERBOSITY sqlstate select infinite_recurse(); +-- \echo :LAST_ERROR_MESSAGE diff --git a/crates/squawk_parser/tests/data/regression_suite/inherit.sql b/crates/squawk_parser/tests/data/regression_suite/inherit.sql index e55fae84..c1c150a5 100644 --- a/crates/squawk_parser/tests/data/regression_suite/inherit.sql +++ b/crates/squawk_parser/tests/data/regression_suite/inherit.sql @@ -248,6 +248,8 @@ select pc.relname, pgc.conname, pgc.contype, pgc.conislocal, pgc.coninhcount, pg -- Test that child does not inherit NO INHERIT constraints create table c1 () inherits (p1); +-- \d p1 +-- \d c1 -- Test that child does not override inheritable constraints of the parent create table c2 (constraint p2chk check (ff1 > 10) no inherit) inherits (p1); --fails @@ -344,15 +346,20 @@ insert into c1 values(1,1,2); alter table p2 add check (f2>0); insert into c1 values(1,-1,2); -- fail create table c2(f3 int) inherits(p1,p2); +-- \d c2 create table c3 (f4 int) inherits(c1,c2); +-- \d c3 drop table p1 cascade; drop table p2 cascade; create table pp1 (f1 int); create table cc1 (f2 text, f3 int) inherits (pp1); alter table pp1 add column a1 int check (a1 > 0); +-- \d cc1 create table cc2(f4 float) inherits(pp1,cc1); +-- \d cc2 alter table pp1 add column a2 int check (a2 > 0); +-- \d cc2 drop table pp1 cascade; -- Test for renaming in simple multiple inheritance @@ -364,6 +371,7 @@ ALTER TABLE inht1 RENAME a TO aa; ALTER TABLE inht1 RENAME b TO bb; -- to be failed ALTER TABLE inhts RENAME aa TO aaa; -- to be failed ALTER TABLE inhts RENAME d TO dd; +-- \d+ inhts DROP TABLE inhts; @@ -373,6 +381,8 @@ CREATE TABLE inhtb () INHERITS (inhta); CREATE TABLE inhtc () INHERITS (inhtb); CREATE TABLE inhtd () INHERITS (inhta, inhtb, inhtc); ALTER TABLE inhta ADD COLUMN i int, ADD COLUMN j bigint DEFAULT 1; +-- \d+ inhta +-- \d+ inhtd DROP TABLE inhta, inhtb, inhtc, inhtd; -- Test for renaming in diamond inheritance @@ -381,10 +391,12 @@ CREATE TABLE inht3 (y int) INHERITS (inht1); CREATE TABLE inht4 (z int) INHERITS (inht2, inht3); ALTER TABLE inht1 RENAME aa TO aaa; +-- \d+ inht4 CREATE TABLE inhts (d int) INHERITS (inht2, inhs1); ALTER TABLE inht1 RENAME aaa TO aaaa; ALTER TABLE inht1 RENAME b TO bb; -- to be failed +-- \d+ inhts WITH RECURSIVE r AS ( SELECT 'inht1'::regclass AS inhrelid @@ -403,7 +415,10 @@ DROP TABLE inht1, inhs1 CASCADE; -- Test non-inheritable indices [UNIQUE, EXCLUDE] constraints CREATE TABLE test_constraints (id int, val1 varchar, val2 int, UNIQUE(val1, val2)); CREATE TABLE test_constraints_inh () INHERITS (test_constraints); +-- \d+ test_constraints ALTER TABLE ONLY test_constraints DROP CONSTRAINT test_constraints_val1_val2_key; +-- \d+ test_constraints +-- \d+ test_constraints_inh DROP TABLE test_constraints_inh; DROP TABLE test_constraints; @@ -412,7 +427,10 @@ CREATE TABLE test_ex_constraints ( EXCLUDE USING gist (c WITH &&) ); CREATE TABLE test_ex_constraints_inh () INHERITS (test_ex_constraints); +-- \d+ test_ex_constraints ALTER TABLE test_ex_constraints DROP CONSTRAINT test_ex_constraints_c_excl; +-- \d+ test_ex_constraints +-- \d+ test_ex_constraints_inh DROP TABLE test_ex_constraints_inh; DROP TABLE test_ex_constraints; @@ -420,7 +438,11 @@ DROP TABLE test_ex_constraints; CREATE TABLE test_primary_constraints(id int PRIMARY KEY); CREATE TABLE test_foreign_constraints(id1 int REFERENCES test_primary_constraints(id)); CREATE TABLE test_foreign_constraints_inh () INHERITS (test_foreign_constraints); +-- \d+ test_primary_constraints +-- \d+ test_foreign_constraints ALTER TABLE test_foreign_constraints DROP CONSTRAINT test_foreign_constraints_id1_fkey; +-- \d+ test_foreign_constraints +-- \d+ test_foreign_constraints_inh DROP TABLE test_foreign_constraints_inh; DROP TABLE test_foreign_constraints; DROP TABLE test_primary_constraints; @@ -512,6 +534,7 @@ create table p1(f1 int constraint f1_pos CHECK (f1 > 0)); create table p1_c1 (f1 int constraint f1_pos CHECK (f1 > 0)) inherits (p1); alter table p1_c1 drop constraint f1_pos; alter table p1 drop constraint f1_pos; +-- \d p1_c1 drop table p1 cascade; create table p1(f1 int constraint f1_pos CHECK (f1 > 0)); @@ -520,13 +543,16 @@ create table p1p2_c1 (f1 int) inherits (p1, p2); create table p1p2_c2 (f1 int constraint f1_pos CHECK (f1 > 0)) inherits (p1, p2); alter table p2 drop constraint f1_pos; alter table p1 drop constraint f1_pos; +-- \d p1p2_c* drop table p1, p2 cascade; create table p1(f1 int constraint f1_pos CHECK (f1 > 0)); create table p1_c1() inherits (p1); create table p1_c2() inherits (p1); create table p1_c1c2() inherits (p1_c1, p1_c2); +-- \d p1_c1c2 alter table p1 drop constraint f1_pos; +-- \d p1_c1c2 drop table p1 cascade; create table p1(f1 int constraint f1_pos CHECK (f1 > 0)); @@ -537,6 +563,7 @@ alter table p1_c2 drop constraint f1_pos; alter table p1 drop constraint f1_pos; alter table p1_c1c2 drop constraint f1_pos; alter table p1_c2 drop constraint f1_pos; +-- \d p1_c1c2 drop table p1 cascade; -- Test that a valid child can have not-valid parent, but not vice versa @@ -804,14 +831,21 @@ create table cc1 (f2 text, f3 int) inherits (pp1); create table cc2 (f4 float) inherits (pp1,cc1); create table cc3 () inherits (pp1,cc1,cc2); alter table pp1 alter f1 set not null; +-- \d+ cc3 alter table cc3 no inherit pp1; alter table cc3 no inherit cc1; alter table cc3 no inherit cc2; +-- \d+ cc3 drop table cc3; -- named NOT NULL constraint alter table cc1 add column a2 int constraint nn not null; +-- \d+ cc1 +-- \d+ cc2 alter table pp1 alter column f1 set not null; +-- \d+ pp1 +-- \d+ cc1 +-- \d+ cc2 -- cannot create table with inconsistent NO INHERIT constraint create table cc3 (a2 int not null no inherit) inherits (cc1); @@ -824,15 +858,18 @@ alter table cc2 alter column a2 drop not null; -- remove constraint from cc1, should succeed alter table cc1 alter column a2 drop not null; +-- \d+ cc1 -- same for cc2 alter table cc2 alter column f1 drop not null; +-- \d+ cc2 -- remove from cc1, should fail again alter table cc1 alter column f1 drop not null; -- remove from pp1, should succeed alter table pp1 alter column f1 drop not null; +-- \d+ pp1 alter table pp1 add primary key (f1); -- Leave these tables around, for pg_upgrade testing @@ -842,6 +879,7 @@ create table inh_parent (f1 int not null no inherit, f2 int not null no inherit) create table inh_child (f1 int not null no inherit, f2 int); alter table inh_child inherit inh_parent; alter table inh_child no inherit inh_parent; +-- \d+ inh_child drop table inh_parent, inh_child; -- test that inhcount is updated correctly through multiple inheritance @@ -851,12 +889,14 @@ create table inh_cc2(f4 float) inherits(inh_pp1,inh_cc1); alter table inh_pp1 alter column f1 set not null; alter table inh_cc2 no inherit inh_pp1; alter table inh_cc2 no inherit inh_cc1; +-- \d+ inh_cc2 drop table inh_pp1, inh_cc1, inh_cc2; create table inh_pp1 (f1 int not null); create table inh_cc1 (f2 text, f3 int) inherits (inh_pp1); create table inh_cc2(f4 float) inherits(inh_pp1,inh_cc1); alter table inh_pp1 alter column f1 drop not null; +-- \d+ inh_cc2 drop table inh_pp1, inh_cc1, inh_cc2; @@ -871,9 +911,11 @@ drop table inh_parent, inh_child, inh_grandchild; create table inh_parent1(a int constraint nn not null); create table inh_parent2(b int constraint nn not null); create table inh_child1 () inherits (inh_parent1, inh_parent2); +-- \d+ inh_child1 create table inh_child2 (constraint foo not null a) inherits (inh_parent1, inh_parent2); alter table inh_child2 no inherit inh_parent2; +-- \d+ inh_child2 drop table inh_parent1, inh_parent2, inh_child1, inh_child2; @@ -886,6 +928,7 @@ select conrelid::regclass, conname, contype, conkey, from pg_constraint where contype in ('n','p') and conrelid::regclass::text in ('inh_child', 'inh_parent1', 'inh_parent2') order by 1, 2; +-- \d+ inh_child drop table inh_parent1, inh_parent2, inh_child; -- NOT NULL NO INHERIT @@ -899,6 +942,7 @@ select conrelid::regclass, conname, contype, conkey, from pg_constraint where contype = 'n' and conrelid::regclass::text like 'inh\_nn\_%' order by 2, 1; +-- \d+ inh_nn* drop table inh_nn_parent, inh_nn_child, inh_nn_child2; CREATE TABLE inh_nn_parent (a int, NOT NULL a NO INHERIT); @@ -942,6 +986,9 @@ alter table inh_child2 inherit inh_child1; -- add NOT NULL constraint recursively alter table inh_parent alter column f1 set not null; +-- \d+ inh_parent +-- \d+ inh_child1 +-- \d+ inh_child2 select conrelid::regclass, conname, contype, coninhcount, conislocal from pg_constraint where contype = 'n' and @@ -955,6 +1002,9 @@ select conrelid::regclass, conname, contype, coninhcount, conislocal -- deinherit inh_child1 create table inh_child3 () inherits (inh_child1); alter table inh_child1 no inherit inh_parent; +-- \d+ inh_parent +-- \d+ inh_child1 +-- \d+ inh_child2 select conrelid::regclass, conname, contype, coninhcount, conislocal from pg_constraint where contype = 'n' and conrelid::regclass::text in ('inh_parent', 'inh_child1', 'inh_child2', 'inh_child3') diff --git a/crates/squawk_parser/tests/data/regression_suite/insert.sql b/crates/squawk_parser/tests/data/regression_suite/insert.sql index 9a11182a..6696202b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/insert.sql +++ b/crates/squawk_parser/tests/data/regression_suite/insert.sql @@ -101,6 +101,7 @@ create rule irule2 as on insert to inserttest2 do also create rule irule3 as on insert to inserttest2 do also insert into inserttest (f4[1].if1, f4[1].if2[2]) select new.f1, new.f2; +-- \d+ inserttest2 drop table inserttest2; drop table inserttest; @@ -153,6 +154,7 @@ create rule irule2 as on insert to inserttest2 do also create rule irule3 as on insert to inserttest2 do also insert into inserttestb (f4[1].if1, f4[1].if2[2]) select new.f1, new.f2; +-- \d+ inserttest2 drop table inserttest2; drop table inserttesta; @@ -348,6 +350,7 @@ from hash_parted order by part; -- test \d+ output on a table which has both partitioned and unpartitioned -- partitions +-- \d+ list_parted -- cleanup drop table range_parted, list_parted; @@ -357,6 +360,7 @@ drop table hash_parted; -- including null create table list_parted (a int) partition by list (a); create table part_default partition of list_parted default; +-- \d+ part_default insert into part_default values (null); insert into part_default values (1); insert into part_default values (-1); @@ -621,9 +625,10 @@ create trigger donothingbrtrig2 before insert on donothingbrtrig_test2 for each alter table donothingbrtrig_test attach partition donothingbrtrig_test1 for values in (1); alter table donothingbrtrig_test attach partition donothingbrtrig_test2 for values in (2); insert into donothingbrtrig_test values (1, 'foo'), (2, 'bar'); --- copy donothingbrtrig_test from stdout; +copy donothingbrtrig_test from stdout; -- 1 baz -- 2 qux +-- \. select tableoid::regclass, * from donothingbrtrig_test; -- cleanup @@ -641,6 +646,15 @@ create table mcrparted6_common_ge_10 partition of mcrparted for values from ('co create table mcrparted7_gt_common_lt_d partition of mcrparted for values from ('common', maxvalue) to ('d', minvalue); create table mcrparted8_ge_d partition of mcrparted for values from ('d', minvalue) to (maxvalue, maxvalue); +-- \d+ mcrparted +-- \d+ mcrparted1_lt_b +-- \d+ mcrparted2_b +-- \d+ mcrparted3_c_to_common +-- \d+ mcrparted4_common_lt_0 +-- \d+ mcrparted5_common_0_to_10 +-- \d+ mcrparted6_common_ge_10 +-- \d+ mcrparted7_gt_common_lt_d +-- \d+ mcrparted8_ge_d insert into mcrparted values ('aaa', 0), ('b', 0), ('bz', 10), ('c', -10), ('comm', -10), ('common', -10), ('common', 0), ('common', 10), diff --git a/crates/squawk_parser/tests/data/regression_suite/insert_conflict.sql b/crates/squawk_parser/tests/data/regression_suite/insert_conflict.sql index 549c4645..2699dcc3 100644 --- a/crates/squawk_parser/tests/data/regression_suite/insert_conflict.sql +++ b/crates/squawk_parser/tests/data/regression_suite/insert_conflict.sql @@ -3,6 +3,11 @@ -- create table insertconflicttest(key int4, fruit text); +-- invalid clauses +-- insert into insertconflicttest values (1) on conflict (key int4_ops (fillfactor=10)) do nothing; +-- insert into insertconflicttest values (1) on conflict (key asc) do nothing; +-- insert into insertconflicttest values (1) on conflict (key nulls last) do nothing; + -- These things should work through a view, as well create view insertconflictview as select * from insertconflicttest; diff --git a/crates/squawk_parser/tests/data/regression_suite/interval.sql b/crates/squawk_parser/tests/data/regression_suite/interval.sql index cca07fc6..f939e76d 100644 --- a/crates/squawk_parser/tests/data/regression_suite/interval.sql +++ b/crates/squawk_parser/tests/data/regression_suite/interval.sql @@ -124,6 +124,16 @@ DROP TABLE INTERVAL_TBL_OF; -- stored internally. CREATE TABLE INTERVAL_MULDIV_TBL (span interval); +COPY INTERVAL_MULDIV_TBL FROM STDIN; +-- 41 mon 12 days 360:00 +-- -41 mon -12 days +360:00 +-- -12 days +-- 9 mon -27 days 12:34:56 +-- -3 years 482 days 76:54:32.189 +-- 4 mon +-- 14 mon +-- 999 mon 999 days +-- \. SELECT span * 0.3 AS product FROM INTERVAL_MULDIV_TBL; diff --git a/crates/squawk_parser/tests/data/regression_suite/join.sql b/crates/squawk_parser/tests/data/regression_suite/join.sql index cc5128ad..7ec84f3b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/join.sql +++ b/crates/squawk_parser/tests/data/regression_suite/join.sql @@ -759,6 +759,26 @@ select * from tbl_rs t1 join (select t1.a+t3.a from tbl_rs t3) and t2.a < 5) on true; +-- +-- regression test for bug with parallel-hash-right-semi join +-- + +begin; + +-- encourage use of parallel plans +set local parallel_setup_cost=0; +set local parallel_tuple_cost=0; +set local min_parallel_table_scan_size=0; +set local max_parallel_workers_per_gather=4; + +-- ensure we don't get parallel hash right semi join +explain (costs off) +select * from tenk1 t1 +where exists (select 1 from tenk1 t2 where fivethous = t1.fivethous) +and t1.fivethous < 5; + +rollback; + -- -- regression test for bug #13908 (hash join with skew tuples & nbatch increase) -- @@ -839,6 +859,13 @@ explain (costs off) select a.* from tenk1 a left join tenk1 b on a.unique1 = b.unique2 where b.unique2 is null; +-- check that we avoid de-duplicating columns redundantly +set enable_memoize to off; +explain (costs off) +select 1 from tenk1 +where (hundred, thousand) in (select twothousand, twothousand from onek); +reset enable_memoize; + -- -- regression test for bogus RTE_GROUP entries -- @@ -1277,6 +1304,23 @@ where t1.unique2 < 42 and t1.stringu1 > t2.stringu2; -- variant that isn't quite a star-schema case +explain (verbose, costs off) +select ss1.d1 from + tenk1 as t1 + inner join tenk1 as t2 + on t1.tenthous = t2.ten + inner join + int8_tbl as i8 + left join int4_tbl as i4 + inner join (select 64::information_schema.cardinal_number as d1 + from tenk1 t3, + lateral (select abs(t3.unique1) + random()) ss0(x) + where t3.fivethous < 0) as ss1 + on i4.f1 = ss1.d1 + on i8.q1 = i4.f1 + on t1.tenthous = ss1.d1 +where t1.unique1 < i4.f1; + select ss1.d1 from tenk1 as t1 inner join tenk1 as t2 @@ -1332,6 +1376,64 @@ select * from (select 1 as x) ss1 left join (select 2 as y) ss2 on (true), lateral (select ss2.y as z limit 1) ss3; +-- This example demonstrates the folly of our old "have_dangerous_phv" logic +begin; +set local from_collapse_limit to 2; +explain (verbose, costs off) +select * from int8_tbl t1 + left join + (select coalesce(t2.q1 + x, 0) from int8_tbl t2, + lateral (select t3.q1 as x from int8_tbl t3, + lateral (select t2.q1, t3.q1 offset 0) s)) + on true; +rollback; + +-- ... not that the initial replacement didn't have some bugs too +begin; +create temp table t(i int primary key); + +explain (verbose, costs off) +select * from t t1 + left join (select 1 as x, * from t t2(i2)) t2ss on t1.i = t2ss.i2 + left join t t3(i3) on false + left join t t4(i4) on t4.i4 > t2ss.x; + +explain (verbose, costs off) +select * from + (select k from + (select i, coalesce(i, j) as k from + (select i from t union all select 0) + join (select 1 as j limit 1) on i = j) + right join (select 2 as x) on true + join (select 3 as y) on i is not null + ), + lateral (select k as kl limit 1); + +rollback; + +-- PHVs containing SubLinks are quite tricky to get right +explain (verbose, costs off) +select * +from int8_tbl i8 + inner join + (select (select true) as x + from int4_tbl i4, lateral (select i4.f1 as y limit 1) ss1 + where i4.f1 = 0) ss2 on true + right join (select false as z) ss3 on true, + lateral (select i8.q2 as q2l where x limit 1) ss4 +where i8.q2 = 123; + +explain (verbose, costs off) +select * +from int8_tbl i8 + inner join + (select (select true) as x + from int4_tbl i4, lateral (select 1 as y limit 1) ss1 + where i4.f1 = 0) ss2 on true + right join (select false as z) ss3 on true, + lateral (select i8.q2 as q2l where x limit 1) ss4 +where i8.q2 = 123; + -- Test proper handling of appendrel PHVs during useless-RTE removal explain (costs off) select * from @@ -1902,13 +2004,13 @@ select * from (select 1 as id) as xx left join (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id)) - on (xx.id = coalesce(yy.id)); + on (xx.id = coalesce(yy.id, yy.id)); select * from (select 1 as id) as xx left join (tenk1 as a1 full join (select 1 as id) as yy on (a1.unique1 = yy.id)) - on (xx.id = coalesce(yy.id)); + on (xx.id = coalesce(yy.id, yy.id)); -- -- test ability to push constants through outer join clauses @@ -2130,6 +2232,24 @@ explain (costs off) select d.* from d left join (select * from b group by b.id, b.c_id) s on d.a = s.id and d.b = s.c_id; +-- check that join removal works for a left join when joining a subquery +-- that is guaranteed to be unique by GROUPING SETS +explain (costs off) +select d.* from d left join (select 1 as x from b group by ()) s + on d.a = s.x; + +explain (costs off) +select d.* from d left join (select 1 as x from b group by grouping sets(())) s + on d.a = s.x; + +explain (costs off) +select d.* from d left join (select 1 as x from b group by grouping sets(()), grouping sets(())) s + on d.a = s.x; + +explain (costs off) +select d.* from d left join (select 1 as x from b group by distinct grouping sets((), ())) s + on d.a = s.x; + -- similarly, but keying off a DISTINCT clause explain (costs off) select d.* from d left join (select distinct * from b) s @@ -2143,6 +2263,20 @@ explain (costs off) select d.* from d left join (select * from b group by b.id, b.c_id) s on d.a = s.id; +-- join removal is not possible when the GROUP BY contains non-empty grouping +-- sets or multiple empty grouping sets +explain (costs off) +select d.* from d left join (select 1 as x from b group by rollup(x)) s + on d.a = s.x; + +explain (costs off) +select d.* from d left join (select 1 as x from b group by grouping sets((), ())) s + on d.a = s.x; + +explain (costs off) +select d.* from d left join (select 1 as x from b group by grouping sets((), grouping sets(()))) s + on d.a = s.x; + -- similarly, but keying off a DISTINCT clause explain (costs off) select d.* from d left join (select distinct * from b) s @@ -2345,6 +2479,69 @@ where t1.a = s.c; rollback; +-- check handling of semijoins after join removal: we must suppress +-- unique-ification of known-constant values +begin; + +create temp table t (a int unique, b int); +insert into t values (1, 2); + +explain (verbose, costs off) +select t1.a from t t1 + left join t t2 on t1.a = t2.a + join t t3 on true +where exists (select 1 from t t4 + join t t5 on t4.b = t5.b + join t t6 on t5.b = t6.b + where t1.a = t4.a and t3.a = t5.a and t4.a = 1); + +select t1.a from t t1 + left join t t2 on t1.a = t2.a + join t t3 on true +where exists (select 1 from t t4 + join t t5 on t4.b = t5.b + join t t6 on t5.b = t6.b + where t1.a = t4.a and t3.a = t5.a and t4.a = 1); + +rollback; + +-- check handling of semijoins if all RHS columns are equated to constants: we +-- should suppress unique-ification in this case. +begin; + +create temp table t (a int, b int); +insert into t values (1, 2); + +explain (costs off) +select * from t t1, t t2 where exists + (select 1 from t t3 where t1.a = t3.a and t2.b = t3.b and t3.a = 1 and t3.b = 2); + +select * from t t1, t t2 where exists + (select 1 from t t3 where t1.a = t3.a and t2.b = t3.b and t3.a = 1 and t3.b = 2); + +rollback; + +-- check handling of semijoin unique-ification for child relations if all RHS +-- columns are equated to constants. +begin; + +create temp table p (a int, b int) partition by range (a); +create temp table p1 partition of p for values from (0) to (10); +create temp table p2 partition of p for values from (10) to (20); +insert into p values (1, 2); +insert into p values (10, 20); + +set enable_partitionwise_join to on; + +explain (costs off) +select * from p t1 where exists + (select 1 from p t2 where t1.a = t2.a and t1.a = 1); + +select * from p t1 where exists + (select 1 from p t2 where t1.a = t2.a and t1.a = 1); + +rollback; + -- test cases where we can remove a join, but not a PHV computed at it begin; @@ -3094,9 +3291,9 @@ select * from int4_tbl i left join lateral (select * from int2_tbl j where i.f1 = j.f1) k on true; explain (verbose, costs off) select * from int4_tbl i left join - lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true; + lateral (select coalesce(i, i) from int2_tbl j where i.f1 = j.f1) k on true; select * from int4_tbl i left join - lateral (select coalesce(i) from int2_tbl j where i.f1 = j.f1) k on true; + lateral (select coalesce(i, i) from int2_tbl j where i.f1 = j.f1) k on true; explain (verbose, costs off) select * from int4_tbl a, lateral ( @@ -3434,7 +3631,7 @@ explain (verbose, costs off) select * from j1 left join j2 on j1.id1 = j2.id1 where j1.id2 = 1; -create unique index j1_id2_idx on j1(id2) where id2 is not null; +create unique index j1_id2_idx on j1(id2) where id2 > 0; -- ensure we don't use a partial unique index as unique proofs explain (verbose, costs off) @@ -3562,7 +3759,7 @@ ANALYZE group_tbl; EXPLAIN (COSTS OFF) SELECT 1 FROM group_tbl t1 - LEFT JOIN (SELECT a c1, COALESCE(a) c2 FROM group_tbl t2) s ON TRUE + LEFT JOIN (SELECT a c1, COALESCE(a, a) c2 FROM group_tbl t2) s ON TRUE GROUP BY s.c1, s.c2; DROP TABLE group_tbl; diff --git a/crates/squawk_parser/tests/data/regression_suite/json_encoding.sql b/crates/squawk_parser/tests/data/regression_suite/json_encoding.sql index 4f039d0e..ae761117 100644 --- a/crates/squawk_parser/tests/data/regression_suite/json_encoding.sql +++ b/crates/squawk_parser/tests/data/regression_suite/json_encoding.sql @@ -5,7 +5,10 @@ -- We provide expected-results files for UTF8 (json_encoding.out) -- and for SQL_ASCII (json_encoding_1.out). Skip otherwise. SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') - AS skip_test ; + AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif SELECT getdatabaseencoding(); -- just to label the results files diff --git a/crates/squawk_parser/tests/data/regression_suite/jsonb.sql b/crates/squawk_parser/tests/data/regression_suite/jsonb.sql index 669ec1b6..5afeb598 100644 --- a/crates/squawk_parser/tests/data/regression_suite/jsonb.sql +++ b/crates/squawk_parser/tests/data/regression_suite/jsonb.sql @@ -1,9 +1,11 @@ -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR CREATE TABLE testjsonb ( j jsonb ); +-- \set filename :abs_srcdir '/data/jsonb.data' COPY testjsonb FROM 'filename'; -- Strings. @@ -400,6 +402,12 @@ SELECT jsonb_build_object('{1,2,3}'::int[], 3); SELECT jsonb_object_agg(1, NULL::jsonb); SELECT jsonb_object_agg(NULL, '{"a":1}'); +SELECT jsonb_object_agg_unique(i, null) OVER (ORDER BY i) + FROM generate_series(1, 10) g(i); + +SELECT jsonb_object_agg_unique_strict(i, null) OVER (ORDER BY i) + FROM generate_series(1, 10) g(i); + CREATE TEMP TABLE foo (serial_num int, name text, type text); INSERT INTO foo VALUES (847001,'t15','GE1043'); INSERT INTO foo VALUES (847002,'t16','GE1043'); @@ -1252,6 +1260,7 @@ select jsonb_set('{"a": {"b": [1, 2, 3]}}', '{a, b, NULL}', '"new_value"'); -- jsonb_set_lax +-- \pset null NULL -- pass though non nulls to jsonb_set select jsonb_set_lax('{"a":1,"b":2}','{b}','5') ; @@ -1268,6 +1277,7 @@ select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'retu select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'delete_key') as delete_key; select jsonb_set_lax('{"a":1,"b":2}', '{b}', null, null_value_treatment => 'use_json_null') as use_json_null; +-- \pset null '' -- jsonb_insert select jsonb_insert('{"a": [0,1,2]}', '{a, 1}', '"new_value"'); @@ -1492,7 +1502,9 @@ insert into test_jsonb_subscript select length(id), test_json[id] from test_jsonb_subscript; update test_jsonb_subscript set test_json[id] = '"baz"'; select length(id), test_json[id] from test_jsonb_subscript; +-- \x table test_jsonb_subscript; +-- \x -- jsonb to tsvector select to_tsvector('{"a": "aaa bbb ddd ccc", "b": ["eee fff ggg"], "c": {"d": "hhh iii"}}'::jsonb); diff --git a/crates/squawk_parser/tests/data/regression_suite/jsonpath_encoding.sql b/crates/squawk_parser/tests/data/regression_suite/jsonpath_encoding.sql index f00a0ce8..d64ea9b7 100644 --- a/crates/squawk_parser/tests/data/regression_suite/jsonpath_encoding.sql +++ b/crates/squawk_parser/tests/data/regression_suite/jsonpath_encoding.sql @@ -5,7 +5,10 @@ -- We provide expected-results files for UTF8 (jsonpath_encoding.out) -- and for SQL_ASCII (jsonpath_encoding_1.out). Skip otherwise. SELECT getdatabaseencoding() NOT IN ('UTF8', 'SQL_ASCII') - AS skip_test ; + AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif SELECT getdatabaseencoding(); -- just to label the results files diff --git a/crates/squawk_parser/tests/data/regression_suite/largeobject.sql b/crates/squawk_parser/tests/data/regression_suite/largeobject.sql index b2df6bc1..52882c49 100644 --- a/crates/squawk_parser/tests/data/regression_suite/largeobject.sql +++ b/crates/squawk_parser/tests/data/regression_suite/largeobject.sql @@ -3,6 +3,8 @@ -- -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR +-- \getenv abs_builddir PG_ABS_BUILDDIR -- ensure consistent test output regardless of the default bytea format SET bytea_output TO escape; @@ -21,6 +23,10 @@ COMMENT ON LARGE OBJECT 42 IS 'the ultimate answer'; RESET SESSION AUTHORIZATION; -- Test psql's \lo_list et al (we assume no other LOs exist yet) +-- \lo_list +-- \lo_list+ +-- \lo_unlink 42 +-- \dl -- Load a file CREATE TABLE lotest_stash_values (loid oid, fd integer); @@ -85,10 +91,11 @@ END; -- Note: we intentionally don't remove the object created here; -- it's left behind to help test pg_dump. -SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values; +SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values +/* \gset */; -- Add a comment to it, as well, for pg_dump/pg_upgrade testing. -COMMENT ON LARGE OBJECT 1235 IS 'I Wandered Lonely as a Cloud'; +COMMENT ON LARGE OBJECT 10101 IS 'I Wandered Lonely as a Cloud'; -- Read out a portion BEGIN; @@ -125,6 +132,12 @@ BEGIN; SELECT lo_open(loid, x'40000'::int) from lotest_stash_values; ABORT; +-- \set filename :abs_builddir '/results/invalid/path' +-- \set dobody 'DECLARE loid oid; BEGIN ' +-- \set dobody :dobody 'SELECT tbl.loid INTO loid FROM lotest_stash_values tbl; ' +-- \set dobody :dobody 'PERFORM lo_export(loid, ' :'filename' '); ' +-- \set dobody :dobody 'EXCEPTION WHEN UNDEFINED_FILE THEN ' +-- \set dobody :dobody 'RAISE NOTICE ''could not open file, as expected''; END' DO 'dobody'; -- Test truncation. @@ -175,6 +188,7 @@ SELECT lo_unlink(loid) from lotest_stash_values; TRUNCATE lotest_stash_values; +-- \set filename :abs_srcdir '/data/tenk.data' INSERT INTO lotest_stash_values (loid) SELECT lo_import('filename'); BEGIN; @@ -204,45 +218,58 @@ SELECT loread(fd, 36) FROM lotest_stash_values; SELECT lo_close(fd) FROM lotest_stash_values; END; +-- \set filename :abs_builddir '/results/lotest.txt' SELECT lo_export(loid, 'filename') FROM lotest_stash_values; +-- \lo_import :filename +-- \set newloid :LASTOID -- just make sure \lo_export does not barf +-- \set filename :abs_builddir '/results/lotest2.txt' +-- \lo_export 10101 :filename -- This is a hack to test that export/import are reversible -- This uses knowledge about the inner workings of large object mechanism -- which should not be used outside it. This makes it a HACK SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values) EXCEPT -SELECT pageno, data FROM pg_largeobject WHERE loid = 'newloid'; +SELECT pageno, data FROM pg_largeobject WHERE loid = 10101; SELECT lo_unlink(loid) FROM lotest_stash_values; TRUNCATE lotest_stash_values; +-- \lo_unlink 10101 +-- \set filename :abs_builddir '/results/lotest.txt' +-- \lo_import :filename +-- \set newloid_1 :LASTOID -SELECT lo_from_bytea(0, lo_get('newloid_1')) AS newloid_2; +SELECT lo_from_bytea(0, lo_get(10101_1)) AS newloid_2 +/* \gset */; -SELECT fipshash(lo_get('newloid_1')) = fipshash(lo_get('newloid_2')); +SELECT fipshash(lo_get(10101_1)) = fipshash(lo_get(10101_2)); -SELECT lo_get('newloid_1', 0, 20); -SELECT lo_get('newloid_1', 10, 20); -SELECT lo_put('newloid_1', 5, decode('afafafaf', 'hex')); -SELECT lo_get('newloid_1', 0, 20); +SELECT lo_get(10101_1, 0, 20); +SELECT lo_get(10101_1, 10, 20); +SELECT lo_put(10101_1, 5, decode('afafafaf', 'hex')); +SELECT lo_get(10101_1, 0, 20); -SELECT lo_put('newloid_1', 4294967310, 'foo'); -SELECT lo_get('newloid_1'); -SELECT lo_get('newloid_1', 4294967294, 100); +SELECT lo_put(10101_1, 4294967310, 'foo'); +SELECT lo_get(10101_1); +SELECT lo_get(10101_1, 4294967294, 100); +-- \lo_unlink 10101_1 +-- \lo_unlink 10101_2 -- This object is left in the database for pg_dump test purposes -SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid; +SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid +/* \gset */; SET bytea_output TO hex; -SELECT lo_get(1235); +SELECT lo_get(10101); -- Create one more object that we leave behind for testing pg_dump/pg_upgrade; -- this one intentionally has an OID in the system range diff --git a/crates/squawk_parser/tests/data/regression_suite/limit.sql b/crates/squawk_parser/tests/data/regression_suite/limit.sql index d59ce643..33f95442 100644 --- a/crates/squawk_parser/tests/data/regression_suite/limit.sql +++ b/crates/squawk_parser/tests/data/regression_suite/limit.sql @@ -186,14 +186,19 @@ SELECT ''::text AS two, unique1, unique2, stringu1 -- test ruleutils CREATE VIEW limit_thousand_v_1 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST 5 ROWS WITH TIES OFFSET 10; +-- \d+ limit_thousand_v_1 CREATE VIEW limit_thousand_v_2 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand OFFSET 10 FETCH FIRST 5 ROWS ONLY; +-- \d+ limit_thousand_v_2 CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST NULL ROWS WITH TIES; -- fails CREATE VIEW limit_thousand_v_3 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST (NULL+1) ROWS WITH TIES; +-- \d+ limit_thousand_v_3 CREATE VIEW limit_thousand_v_4 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST (5::bigint) ROWS WITH TIES; +-- \d+ limit_thousand_v_4 CREATE VIEW limit_thousand_v_5 AS SELECT thousand FROM onek WHERE thousand < 995 ORDER BY thousand FETCH FIRST NULL ROWS ONLY; +-- \d+ limit_thousand_v_5 -- leave these views diff --git a/crates/squawk_parser/tests/data/regression_suite/lock.sql b/crates/squawk_parser/tests/data/regression_suite/lock.sql index 7841e0ff..747f74f6 100644 --- a/crates/squawk_parser/tests/data/regression_suite/lock.sql +++ b/crates/squawk_parser/tests/data/regression_suite/lock.sql @@ -3,7 +3,10 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix -- Setup CREATE SCHEMA lock_schema1; diff --git a/crates/squawk_parser/tests/data/regression_suite/matview.sql b/crates/squawk_parser/tests/data/regression_suite/matview.sql index 6386542d..8d6d749b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/matview.sql +++ b/crates/squawk_parser/tests/data/regression_suite/matview.sql @@ -40,11 +40,18 @@ CREATE MATERIALIZED VIEW mvtest_bb AS SELECT * FROM mvtest_tvvmv; CREATE INDEX mvtest_aa ON mvtest_bb (grandtot); -- check that plans seem reasonable +-- \d+ mvtest_tvm +-- \d+ mvtest_tvm +-- \d+ mvtest_tvvm +-- \d+ mvtest_bb -- test schema behavior CREATE SCHEMA mvtest_mvschema; ALTER MATERIALIZED VIEW mvtest_tvm SET SCHEMA mvtest_mvschema; +-- \d+ mvtest_tvm +-- \d+ mvtest_tvmm SET search_path = mvtest_mvschema, public; +-- \d+ mvtest_tvm -- modify the underlying table data INSERT INTO mvtest_t VALUES (6, 'z', 13); @@ -91,6 +98,10 @@ REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_tvmm WITH NO DATA; -- no tuple locks on materialized views SELECT * FROM mvtest_tvvm FOR SHARE; +-- we don't support temp materialized views, so disallow this case: +CREATE TEMP TABLE mvtest_temp_t (id int NOT NULL, type text NOT NULL, amt numeric NOT NULL); +CREATE MATERIALIZED VIEW mvtest_temp_tm AS SELECT * FROM mvtest_temp_t; + -- test join of mv and view SELECT type, m.totamt AS mtot, v.totamt AS vtot FROM mvtest_tm m LEFT JOIN mvtest_tv v USING (type) ORDER BY type; @@ -107,7 +118,9 @@ ROLLBACK; -- some additional tests not using base tables CREATE VIEW mvtest_vt1 AS SELECT 1 moo; CREATE VIEW mvtest_vt2 AS SELECT moo, 2*moo FROM mvtest_vt1 UNION ALL SELECT moo, 3*moo FROM mvtest_vt1; +-- \d+ mvtest_vt2 CREATE MATERIALIZED VIEW mv_test2 AS SELECT moo, 2*moo FROM mvtest_vt2 UNION ALL SELECT moo, 3*moo FROM mvtest_vt2; +-- \d+ mv_test2 CREATE MATERIALIZED VIEW mv_test3 AS SELECT * FROM mv_test2 WHERE moo = 12345; SELECT relispopulated FROM pg_class WHERE oid = 'mv_test3'::regclass; @@ -181,6 +194,7 @@ DROP TABLE mvtest_v CASCADE; -- so that we don't end up with unknown-type columns. CREATE MATERIALIZED VIEW mv_unspecified_types AS SELECT 42 as i, 42.5 as num, 'foo' as u, 'foo'::unknown as u2, null as n; +-- \d+ mv_unspecified_types SELECT * FROM mv_unspecified_types; DROP MATERIALIZED VIEW mv_unspecified_types; diff --git a/crates/squawk_parser/tests/data/regression_suite/memoize.sql b/crates/squawk_parser/tests/data/regression_suite/memoize.sql index c0d47fa8..8d1cdd69 100644 --- a/crates/squawk_parser/tests/data/regression_suite/memoize.sql +++ b/crates/squawk_parser/tests/data/regression_suite/memoize.sql @@ -26,6 +26,7 @@ begin ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N'); ln := regexp_replace(ln, 'loops=\d+', 'loops=N'); ln := regexp_replace(ln, 'Index Searches: \d+', 'Index Searches: N'); + ln := regexp_replace(ln, 'Memory: \d+kB', 'Memory: NkB'); return next ln; end loop; end; @@ -244,3 +245,29 @@ RESET max_parallel_workers_per_gather; RESET parallel_tuple_cost; RESET parallel_setup_cost; RESET min_parallel_table_scan_size; + +-- Ensure memoize works for ANTI joins +CREATE TABLE tab_anti (a int, b boolean); +INSERT INTO tab_anti SELECT i%3, false FROM generate_series(1,100)i; +ANALYZE tab_anti; + +-- Ensure we get a Memoize plan for ANTI join +SELECT explain_memoize(' +SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN +LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2 +ON t1.a+1 = t2.a +WHERE t2.a IS NULL;', false); + +-- And check we get the expected results. +SELECT COUNT(*) FROM tab_anti t1 LEFT JOIN +LATERAL (SELECT DISTINCT ON (a) a, b, t1.a AS x FROM tab_anti t2) t2 +ON t1.a+1 = t2.a +WHERE t2.a IS NULL; + +-- Ensure we do not add memoize node for SEMI join +EXPLAIN (COSTS OFF) +SELECT * FROM tab_anti t1 WHERE t1.a IN + (SELECT a FROM tab_anti t2 WHERE t2.b IN + (SELECT t1.b FROM tab_anti t3 WHERE t2.a > 1 OFFSET 0)); + +DROP TABLE tab_anti; diff --git a/crates/squawk_parser/tests/data/regression_suite/merge.sql b/crates/squawk_parser/tests/data/regression_suite/merge.sql index 4ef95aed..ec35bdb4 100644 --- a/crates/squawk_parser/tests/data/regression_suite/merge.sql +++ b/crates/squawk_parser/tests/data/regression_suite/merge.sql @@ -59,37 +59,37 @@ USING source AS s ON t.tid = s.sid WHEN NOT MATCHED BY SOURCE THEN INSERT DEFAULT VALUES; --- incorrectly specifying INTO target +-- -- incorrectly specifying INTO target -- MERGE INTO target t -- USING source AS s -- ON t.tid = s.sid -- WHEN NOT MATCHED THEN -- INSERT INTO target DEFAULT VALUES; --- Multiple VALUES clause +-- -- Multiple VALUES clause -- MERGE INTO target t -- USING source AS s -- ON t.tid = s.sid -- WHEN NOT MATCHED THEN -- INSERT VALUES (1,1), (2,2); --- SELECT query for INSERT +-- -- SELECT query for INSERT -- MERGE INTO target t -- USING source AS s -- ON t.tid = s.sid -- WHEN NOT MATCHED THEN -- INSERT SELECT (1, 1); -- NOT MATCHED/UPDATE --- MERGE INTO target t --- USING source AS s --- ON t.tid = s.sid --- WHEN NOT MATCHED THEN --- UPDATE SET balance = 0; +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED THEN + UPDATE SET balance = 0; -- NOT MATCHED BY TARGET/UPDATE --- MERGE INTO target t --- USING source AS s --- ON t.tid = s.sid --- WHEN NOT MATCHED BY TARGET THEN --- UPDATE SET balance = 0; --- UPDATE tablename +MERGE INTO target t +USING source AS s +ON t.tid = s.sid +WHEN NOT MATCHED BY TARGET THEN + UPDATE SET balance = 0; +-- -- UPDATE tablename -- MERGE INTO target t -- USING source AS s -- ON t.tid = s.sid @@ -1722,6 +1722,55 @@ WHEN MATCHED THEN DELETE; SELECT * FROM new_measurement ORDER BY city_id, logdate; +-- MERGE into inheritance root table +DROP TRIGGER insert_measurement_trigger ON measurement; +ALTER TABLE measurement ADD CONSTRAINT mcheck CHECK (city_id = 0) NO INHERIT; + +EXPLAIN (COSTS OFF) +MERGE INTO measurement m + USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id - 1, logdate, 25, 100); + +BEGIN; +MERGE INTO measurement m + USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id - 1, logdate, 25, 100); +SELECT * FROM ONLY measurement ORDER BY city_id, logdate; +ROLLBACK; + +ALTER TABLE measurement ENABLE ROW LEVEL SECURITY; +ALTER TABLE measurement FORCE ROW LEVEL SECURITY; +CREATE POLICY measurement_p ON measurement USING (peaktemp IS NOT NULL); + +MERGE INTO measurement m + USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id - 1, logdate, NULL, 100); -- should fail + +MERGE INTO measurement m + USING (VALUES (1, '01-17-2007'::date)) nm(city_id, logdate) ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id - 1, logdate, 25, 100); -- ok +SELECT * FROM ONLY measurement ORDER BY city_id, logdate; + +MERGE INTO measurement m + USING (VALUES (1, '01-18-2007'::date)) nm(city_id, logdate) ON + (m.city_id = nm.city_id and m.logdate=nm.logdate) +WHEN NOT MATCHED THEN INSERT + (city_id, logdate, peaktemp, unitsales) + VALUES (city_id - 1, logdate, 25, 200) +RETURNING merge_action(), m.*; + DROP TABLE measurement, new_measurement CASCADE; DROP FUNCTION measurement_insert_trigger(); diff --git a/crates/squawk_parser/tests/data/regression_suite/misc.sql b/crates/squawk_parser/tests/data/regression_suite/misc.sql index e112fbba..133bc984 100644 --- a/crates/squawk_parser/tests/data/regression_suite/misc.sql +++ b/crates/squawk_parser/tests/data/regression_suite/misc.sql @@ -3,7 +3,12 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR +-- \getenv abs_builddir PG_ABS_BUILDDIR +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix CREATE FUNCTION overpaid(emp) RETURNS bool @@ -68,6 +73,7 @@ DROP TABLE tmp; -- -- copy -- +-- \set filename :abs_builddir '/results/onek.data' COPY onek TO 'filename'; CREATE TEMP TABLE onek_copy (LIKE onek); @@ -78,6 +84,7 @@ SELECT * FROM onek EXCEPT ALL SELECT * FROM onek_copy; SELECT * FROM onek_copy EXCEPT ALL SELECT * FROM onek; +-- \set filename :abs_builddir '/results/stud_emp.data' COPY BINARY stud_emp TO 'filename'; CREATE TEMP TABLE stud_emp_copy (LIKE stud_emp); diff --git a/crates/squawk_parser/tests/data/regression_suite/misc_functions.sql b/crates/squawk_parser/tests/data/regression_suite/misc_functions.sql index 760e8515..93be4c7a 100644 --- a/crates/squawk_parser/tests/data/regression_suite/misc_functions.sql +++ b/crates/squawk_parser/tests/data/regression_suite/misc_functions.sql @@ -1,5 +1,8 @@ -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix -- Function to assist with verifying EXPLAIN which includes costs. A series -- of bool flags allows control over which portions are masked out @@ -74,6 +77,17 @@ SELECT num_nulls(VARIADIC '{}'::int[]); SELECT num_nonnulls(); SELECT num_nulls(); +-- +-- error_on_null() +-- + +SELECT error_on_null(1); +SELECT error_on_null(NULL::int); +SELECT error_on_null(NULL::int[]); +SELECT error_on_null('{1,2,NULL,3}'::int[]); +SELECT error_on_null(ROW(1,NULL::int)); +SELECT error_on_null(ROW(NULL,NULL)); + -- -- canonicalize_path() -- @@ -147,7 +161,8 @@ DROP ROLE regress_log_memory; -- directly, but we can at least verify that the code doesn't fail. -- select setting as segsize -from pg_settings where name = 'wal_segment_size'; +from pg_settings where name = 'wal_segment_size' +/* \gset */; select count(*) > 0 as ok from pg_ls_waldir(); -- Test ProjectSet as well as FunctionScan @@ -345,6 +360,40 @@ SELECT explain_mask_costs($$ SELECT * FROM generate_series(25.0, 2.0, 0.0) g(s);$$, false, true, false, true); +-- +-- Test SupportRequestInlineInFrom request +-- + +CREATE FUNCTION test_inline_in_from_support_func(internal) + RETURNS internal + AS 'regresslib', 'test_inline_in_from_support_func' + LANGUAGE C STRICT; + +CREATE FUNCTION foo_from_bar(colname TEXT, tablename TEXT, filter TEXT) +RETURNS SETOF TEXT +LANGUAGE plpgsql +AS $function$ +DECLARE + sql TEXT; +BEGIN + sql := format('SELECT %I::text FROM %I', colname, tablename); + IF filter IS NOT NULL THEN + sql := CONCAT(sql, format(' WHERE %I::text = $1', colname)); + END IF; + RETURN QUERY EXECUTE sql USING filter; +END; +$function$ STABLE; + +ALTER FUNCTION foo_from_bar(TEXT, TEXT, TEXT) + SUPPORT test_inline_in_from_support_func; + +SELECT * FROM foo_from_bar('f1', 'text_tbl', NULL); +SELECT * FROM foo_from_bar('f1', 'text_tbl', 'doh!'); +EXPLAIN (COSTS OFF) SELECT * FROM foo_from_bar('f1', 'text_tbl', NULL); +EXPLAIN (COSTS OFF) SELECT * FROM foo_from_bar('f1', 'text_tbl', 'doh!'); + +DROP FUNCTION foo_from_bar; + -- Test functions for control data SELECT count(*) > 0 AS ok FROM pg_control_checkpoint(); SELECT count(*) > 0 AS ok FROM pg_control_init(); @@ -360,7 +409,8 @@ SELECT segment_number > 0 AS ok_segment_number, timeline_id FROM pg_split_walfile_name('ffffffFF00000001000000af'); SELECT setting::int8 AS segment_size FROM pg_settings -WHERE name = 'wal_segment_size'; +WHERE name = 'wal_segment_size' +/* \gset */; SELECT segment_number, file_offset FROM pg_walfile_name_offset('0/0'::pg_lsn + 'segment_size'), pg_split_walfile_name(file_name); @@ -387,16 +437,17 @@ CREATE TABLE test_chunk_id (a TEXT, b TEXT STORAGE EXTERNAL); INSERT INTO test_chunk_id VALUES ('x', repeat('x', 8192)); SELECT t.relname AS toastrel FROM pg_class c LEFT JOIN pg_class t ON c.reltoastrelid = t.oid - WHERE c.relname = 'test_chunk_id'; + WHERE c.relname = 'test_chunk_id' +/* \gset */; SELECT pg_column_toast_chunk_id(a) IS NULL, - pg_column_toast_chunk_id(b) IN (SELECT chunk_id FROM pg_toast."toastrel") + pg_column_toast_chunk_id(b) IN (SELECT chunk_id FROM pg_toast.toastrel) FROM test_chunk_id; DROP TABLE test_chunk_id; DROP FUNCTION explain_mask_costs(text, bool, bool, bool, bool); --- test stratnum support functions -SELECT gist_stratnum_common(7); -SELECT gist_stratnum_common(3); +-- test stratnum translation support functions +SELECT gist_translate_cmptype_common(7); +SELECT gist_translate_cmptype_common(3); -- relpath tests diff --git a/crates/squawk_parser/tests/data/regression_suite/multirangetypes.sql b/crates/squawk_parser/tests/data/regression_suite/multirangetypes.sql index de4f46a3..412e7a83 100644 --- a/crates/squawk_parser/tests/data/regression_suite/multirangetypes.sql +++ b/crates/squawk_parser/tests/data/regression_suite/multirangetypes.sql @@ -414,6 +414,28 @@ SELECT nummultirange(numrange(1,3), numrange(4,5)) - nummultirange(numrange(2,9) SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(8,9)); SELECT nummultirange(numrange(1,2), numrange(4,5)) - nummultirange(numrange(-2,0), numrange(8,9)); +-- multirange_minus_multi +SELECT multirange_minus_multi(nummultirange(), nummultirange()); +SELECT multirange_minus_multi(nummultirange(), nummultirange(numrange(1,2))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2)), nummultirange()); +SELECT multirange_minus_multi(nummultirange(numrange(1,2), numrange(3,4)), nummultirange()); +SELECT multirange_minus_multi(nummultirange(numrange(1,2)), nummultirange(numrange(1,2))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2)), nummultirange(numrange(2,4))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2)), nummultirange(numrange(3,4))); +SELECT multirange_minus_multi(nummultirange(numrange(1,4)), nummultirange(numrange(1,2))); +SELECT multirange_minus_multi(nummultirange(numrange(1,4)), nummultirange(numrange(2,3))); +SELECT multirange_minus_multi(nummultirange(numrange(1,4)), nummultirange(numrange(0,8))); +SELECT multirange_minus_multi(nummultirange(numrange(1,4)), nummultirange(numrange(0,2))); +SELECT multirange_minus_multi(nummultirange(numrange(1,8)), nummultirange(numrange(0,2), numrange(3,4))); +SELECT multirange_minus_multi(nummultirange(numrange(1,8)), nummultirange(numrange(2,3), numrange(5,null))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2), numrange(4,5)), nummultirange(numrange(-2,0))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2), numrange(4,5)), nummultirange(numrange(2,4))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2), numrange(4,5)), nummultirange(numrange(3,5))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2), numrange(4,5)), nummultirange(numrange(0,9))); +SELECT multirange_minus_multi(nummultirange(numrange(1,3), numrange(4,5)), nummultirange(numrange(2,9))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2), numrange(4,5)), nummultirange(numrange(8,9))); +SELECT multirange_minus_multi(nummultirange(numrange(1,2), numrange(4,5)), nummultirange(numrange(-2,0), numrange(8,9))); + -- intersection SELECT nummultirange() * nummultirange(); SELECT nummultirange() * nummultirange(numrange(1,2)); @@ -711,6 +733,7 @@ alter type textrange1 owner to regress_multirange_owner; set role regress_multirange_owner; revoke usage on type multitextrange1 from public; -- fail revoke usage on type textrange1 from public; +-- \dT+ *textrange1* create temp table test1(f1 multitextrange1[]); revoke usage on type textrange1 from regress_multirange_owner; create temp table test2(f1 multitextrange1[]); -- fail diff --git a/crates/squawk_parser/tests/data/regression_suite/mvcc.sql b/crates/squawk_parser/tests/data/regression_suite/mvcc.sql index 66289c69..a5e2e3a7 100644 --- a/crates/squawk_parser/tests/data/regression_suite/mvcc.sql +++ b/crates/squawk_parser/tests/data/regression_suite/mvcc.sql @@ -19,7 +19,7 @@ CREATE INDEX clean_aborted_self_key ON clean_aborted_self(key); INSERT INTO clean_aborted_self (key, data) VALUES (-1, 'just to allocate metapage'); -- save index size from before the changes, for comparison -SELECT pg_relation_size('clean_aborted_self_key') AS clean_aborted_self_key_before ; +SELECT pg_relation_size('clean_aborted_self_key') AS clean_aborted_self_key_before /* \gset */; DO $$ BEGIN diff --git a/crates/squawk_parser/tests/data/regression_suite/namespace.sql b/crates/squawk_parser/tests/data/regression_suite/namespace.sql index 0c6da371..d6cb370b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/namespace.sql +++ b/crates/squawk_parser/tests/data/regression_suite/namespace.sql @@ -56,10 +56,10 @@ SELECT COUNT(*) FROM pg_class WHERE relnamespace = CREATE SCHEMA test_ns_schema_renamed; -- fail, already exists CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed; -- ok with notice -- CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed -- fail, disallowed --- CREATE TABLE abc ( --- a serial, --- b int UNIQUE --- ); + CREATE TABLE abc ( + a serial, + b int UNIQUE + ); DROP SCHEMA test_ns_schema_renamed CASCADE; diff --git a/crates/squawk_parser/tests/data/regression_suite/nls.sql b/crates/squawk_parser/tests/data/regression_suite/nls.sql new file mode 100644 index 00000000..cf603d62 --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/nls.sql @@ -0,0 +1,38 @@ +-- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX + +-- \set regresslib :libdir '/regress' :dlsuffix + +CREATE FUNCTION test_translation() + RETURNS void + AS 'regresslib' + LANGUAGE C; + +-- There's less standardization in locale name spellings than one could wish. +-- While some platforms insist on having a codeset name in lc_messages, +-- fortunately it seems that it need not match the actual database encoding. +-- However, if no es_ES locale is installed at all, this'll fail. +SET lc_messages = 'C'; + +do $$ +declare locale text; ok bool; +begin + for locale in values('es_ES'), ('es_ES.UTF-8'), ('es_ES.utf8') + loop + ok = true; + begin + execute format('set lc_messages = %L', locale); + exception when invalid_parameter_value then + ok = false; + end; + exit when ok; + end loop; + -- Don't clutter the expected results with this info, just log it + raise log 'NLS regression test: lc_messages = %', + current_setting('lc_messages'); +end $$; + +SELECT test_translation(); + +RESET lc_messages; diff --git a/crates/squawk_parser/tests/data/regression_suite/numa.sql b/crates/squawk_parser/tests/data/regression_suite/numa.sql index 9bc75cdc..e7daf647 100644 --- a/crates/squawk_parser/tests/data/regression_suite/numa.sql +++ b/crates/squawk_parser/tests/data/regression_suite/numa.sql @@ -1,6 +1,10 @@ -SELECT NOT(pg_numa_available()) AS skip_test ; +SELECT NOT(pg_numa_available()) AS skip_test /* \gset */; +-- \if :skip_test SELECT COUNT(*) = 0 AS ok FROM pg_shmem_allocations_numa; +-- \quit +-- \endif -- switch to superuser +-- \c - SELECT COUNT(*) >= 0 AS ok FROM pg_shmem_allocations_numa; diff --git a/crates/squawk_parser/tests/data/regression_suite/numeric.sql b/crates/squawk_parser/tests/data/regression_suite/numeric.sql index 8e5514ae..495ce763 100644 --- a/crates/squawk_parser/tests/data/regression_suite/numeric.sql +++ b/crates/squawk_parser/tests/data/regression_suite/numeric.sql @@ -869,6 +869,8 @@ SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, 0); SELECT width_bucket(5.0::float8, 3.0::float8, 4.0::float8, -5); SELECT width_bucket(3.5::float8, 3.0::float8, 3.0::float8, 888); SELECT width_bucket('NaN', 3.0, 4.0, 888); +SELECT width_bucket('NaN'::float8, 3.0::float8, 4.0::float8, 888); +SELECT width_bucket(0, 'NaN', 4.0, 888); SELECT width_bucket(0::float8, 'NaN', 4.0::float8, 888); SELECT width_bucket(2.0, 3.0, '-inf', 888); SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888); @@ -876,6 +878,27 @@ SELECT width_bucket(0::float8, '-inf', 4.0::float8, 888); -- normal operation CREATE TABLE width_bucket_test (operand_num numeric, operand_f8 float8); +COPY width_bucket_test (operand_num) FROM stdin; +-- -5.2 +-- -0.0000000001 +-- 0.000000000001 +-- 1 +-- 1.99999999999999 +-- 2 +-- 2.00000000000001 +-- 3 +-- 4 +-- 4.5 +-- 5 +-- 5.5 +-- 6 +-- 7 +-- 8 +-- 9 +-- 9.99999999999999 +-- 10 +-- 10.0000000000001 +-- \. UPDATE width_bucket_test SET operand_f8 = operand_num::float8; @@ -1183,6 +1206,7 @@ CREATE TABLE num_typemod_test ( thousandths numeric(3, 3), millionths numeric(3, 6) ); +-- \d num_typemod_test -- rounding of valid inputs INSERT INTO num_typemod_test VALUES (123456, 123, 0.123, 0.000123, 0.000000123); diff --git a/crates/squawk_parser/tests/data/regression_suite/object_address.sql b/crates/squawk_parser/tests/data/regression_suite/object_address.sql index eeeedba4..48ef32df 100644 --- a/crates/squawk_parser/tests/data/regression_suite/object_address.sql +++ b/crates/squawk_parser/tests/data/regression_suite/object_address.sql @@ -146,9 +146,11 @@ SELECT pg_get_object_address('subscription', '{one}', '{}'); SELECT pg_get_object_address('subscription', '{one,two}', '{}'); -- Make sure that NULL handling is correct. +-- \pset null 'NULL' -- Temporarily disable fancy output, so as future additions never create -- a large amount of diffs. +-- \a\t -- test successful cases WITH objects (type, name, args) AS (VALUES @@ -292,3 +294,4 @@ FROM objects ORDER BY objects.classid, objects.objid, objects.objsubid; -- restore normal output mode +-- \a\t diff --git a/crates/squawk_parser/tests/data/regression_suite/opr_sanity.sql b/crates/squawk_parser/tests/data/regression_suite/opr_sanity.sql index 0315d8e5..275adc17 100644 --- a/crates/squawk_parser/tests/data/regression_suite/opr_sanity.sql +++ b/crates/squawk_parser/tests/data/regression_suite/opr_sanity.sql @@ -391,6 +391,7 @@ WHERE d.classoid IS NULL AND p1.oid <= 9999; -- leakproof function is. If unsure, don't mark it as such. -- temporarily disable fancy output, so catalog changes create less diff noise +-- \a\t SELECT p1.oid::regprocedure FROM pg_proc p1 JOIN pg_namespace pn @@ -406,6 +407,7 @@ WHERE nspname = 'pg_catalog' AND proleakproof AND pronargs = 0 ORDER BY 1; -- restore normal output mode +-- \a\t -- List of functions used by libpq's fe-lobj.c -- @@ -845,7 +847,7 @@ WHERE aggfnoid = 0 OR aggtransfn = 0 OR (aggkind = 'n' AND aggnumdirectargs > 0) OR aggfinalmodify NOT IN ('r', 's', 'w') OR aggmfinalmodify NOT IN ('r', 's', 'w') OR - aggtranstype = 0 OR aggtransspace < 0 OR aggmtransspace < 0; + aggtranstype = 0 OR aggmtransspace < 0; -- Make sure the matching pg_proc entry is sensible, too. diff --git a/crates/squawk_parser/tests/data/regression_suite/partition_aggregate.sql b/crates/squawk_parser/tests/data/regression_suite/partition_aggregate.sql index ab070fee..7c725e26 100644 --- a/crates/squawk_parser/tests/data/regression_suite/partition_aggregate.sql +++ b/crates/squawk_parser/tests/data/regression_suite/partition_aggregate.sql @@ -14,6 +14,8 @@ SET enable_partitionwise_join TO true; SET max_parallel_workers_per_gather TO 0; -- Disable incremental sort, which can influence selected plans due to fuzz factor. SET enable_incremental_sort TO off; +-- Disable eager aggregation, which can interfere with the generation of partitionwise aggregation. +SET enable_eager_aggregate TO off; -- -- Tests for list partitioned tables. @@ -74,6 +76,11 @@ EXPLAIN (COSTS OFF) SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; SELECT a FROM pagg_tab WHERE a < 3 GROUP BY a ORDER BY 1; +-- Test partitionwise aggregation with ordered append path built from fractional paths +EXPLAIN (COSTS OFF) +SELECT count(*) FROM pagg_tab GROUP BY c ORDER BY c LIMIT 1; +SELECT count(*) FROM pagg_tab GROUP BY c ORDER BY c LIMIT 1; + RESET enable_hashagg; -- ROLLUP, partitionwise aggregation does not apply diff --git a/crates/squawk_parser/tests/data/regression_suite/partition_info.sql b/crates/squawk_parser/tests/data/regression_suite/partition_info.sql index b5060bec..2ef65292 100644 --- a/crates/squawk_parser/tests/data/regression_suite/partition_info.sql +++ b/crates/squawk_parser/tests/data/regression_suite/partition_info.sql @@ -127,3 +127,11 @@ SELECT pg_partition_root('ptif_li_child'); DROP VIEW ptif_test_view; DROP MATERIALIZED VIEW ptif_test_matview; DROP TABLE ptif_li_parent, ptif_li_child; + +-- Test about selection of arbiter indexes for partitioned tables with +-- non-valid index on the parent table +CREATE TABLE pt (a int PRIMARY KEY) PARTITION BY RANGE (a); +CREATE TABLE p1 PARTITION OF pt FOR VALUES FROM (1) to (2) PARTITION BY RANGE (a); +CREATE TABLE p1_1 PARTITION OF p1 FOR VALUES FROM (1) TO (2); +CREATE UNIQUE INDEX ON ONLY p1 (a); +INSERT INTO p1 VALUES (1) ON CONFLICT (a) DO NOTHING; diff --git a/crates/squawk_parser/tests/data/regression_suite/partition_join.sql b/crates/squawk_parser/tests/data/regression_suite/partition_join.sql index 30f15ee9..d153297a 100644 --- a/crates/squawk_parser/tests/data/regression_suite/partition_join.sql +++ b/crates/squawk_parser/tests/data/regression_suite/partition_join.sql @@ -35,9 +35,14 @@ SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.b AND t1.b = 0 ORDER BY t1.a, t2.b; -- inner join with partially-redundant join clauses +-- (avoid a mergejoin, because the planner thinks that an non-partitionwise +-- merge join is the cheapest plan, and we want to test a partitionwise join) +BEGIN; +SET LOCAL enable_mergejoin = false; EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.a AND t1.a = t2.b ORDER BY t1.a, t2.b; SELECT t1.a, t1.c, t2.b, t2.c FROM prt1 t1, prt2 t2 WHERE t1.a = t2.a AND t1.a = t2.b ORDER BY t1.a, t2.b; +COMMIT; -- left outer join, 3-way EXPLAIN (COSTS OFF) @@ -219,13 +224,14 @@ EXPLAIN (COSTS OFF) SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; --- test merge joins +-- test merge joins, slightly modifying the query to ensure that we still +-- get a fully partitionwise join SET enable_hashjoin TO off; SET enable_nestloop TO off; EXPLAIN (COSTS OFF) -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; -SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) AND t1.b = 0 ORDER BY t1.a; +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) ORDER BY t1.a; +SELECT t1.* FROM prt1 t1 WHERE t1.a IN (SELECT t1.b FROM prt2 t1 WHERE t1.b IN (SELECT (t1.a + t1.b)/2 FROM prt1_e t1 WHERE t1.c = 0)) ORDER BY t1.a; EXPLAIN (COSTS OFF) SELECT t1.a, t1.c, t2.b, t2.c, t3.a + t3.b, t3.c FROM (prt1 t1 LEFT JOIN prt2 t2 ON t1.a = t2.b) RIGHT JOIN prt1_e t3 ON (t1.a = (t3.a + t3.b)/2) WHERE t3.c = 0 ORDER BY t1.a, t2.b, t3.a + t3.b; @@ -1155,8 +1161,8 @@ ANALYZE plt3_adv; -- merged partition when re-called with plt1_adv_p1 for the second list value -- '0001' of that partition EXPLAIN (COSTS OFF) -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; -SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; +SELECT t1.a, t1.c, t2.a, t2.c, t3.a, t3.c FROM (plt1_adv t1 LEFT JOIN plt2_adv t2 ON (t1.a = t2.a AND t1.c = t2.c)) FULL JOIN plt3_adv t3 ON (t1.a = t3.a AND t1.c = t3.c) WHERE coalesce(t1.a, 0) % 5 != 3 AND coalesce(t1.a, 0) % 5 != 4 ORDER BY t1.c, t1.a, t2.a, t3.a; DROP TABLE plt1_adv; DROP TABLE plt2_adv; @@ -1216,8 +1222,11 @@ INSERT INTO fract_t (id) (SELECT generate_series(0, 1999)); ANALYZE fract_t; -- verify plan; nested index only scans +-- (avoid merge joins, because the costs of partitionwise and non-partitionwise +-- merge joins tend to be almost equal, and we want this test to be stable) SET max_parallel_workers_per_gather = 0; SET enable_partitionwise_join = on; +SET enable_mergejoin = off; EXPLAIN (COSTS OFF) SELECT x.id, y.id FROM fract_t x LEFT JOIN fract_t y USING (id) ORDER BY x.id ASC LIMIT 10; @@ -1240,6 +1249,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 100; -- If almost all the data should be fetched - prefer SeqScan EXPLAIN (COSTS OFF) SELECT * FROM pht1 p1 JOIN pht1 p2 USING (c) LIMIT 1000; +RESET enable_mergejoin; SET max_parallel_workers_per_gather = 1; SET debug_parallel_query = on; -- Partial paths should also be smart enough to employ limits diff --git a/crates/squawk_parser/tests/data/regression_suite/partition_merge.sql b/crates/squawk_parser/tests/data/regression_suite/partition_merge.sql new file mode 100644 index 00000000..50213edf --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/partition_merge.sql @@ -0,0 +1,791 @@ +-- +-- PARTITIONS_MERGE +-- Tests for "ALTER TABLE ... MERGE PARTITIONS ..." command +-- + +CREATE SCHEMA partitions_merge_schema; +CREATE SCHEMA partitions_merge_schema2; +SET search_path = partitions_merge_schema, public; + +-- +-- BY RANGE partitioning +-- + +-- +-- Test for error codes +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_dec2021 PARTITION OF sales_range FOR VALUES FROM ('2021-12-01') TO ('2021-12-31'); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'); +CREATE TABLE sales_mar2022 PARTITION OF sales_range FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'); + +CREATE TABLE sales_apr2022 (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_apr_1 PARTITION OF sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-04-15'); +CREATE TABLE sales_apr_2 PARTITION OF sales_apr2022 FOR VALUES FROM ('2022-04-15') TO ('2022-05-01'); +ALTER TABLE sales_range ATTACH PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'); + +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +-- ERROR: partition with name "sales_feb2022" is already used +ALTER TABLE sales_range MERGE PARTITIONS (sales_feb2022, sales_mar2022, sales_feb2022) INTO sales_feb_mar_apr2022; +-- ERROR: "sales_apr2022" is not a table +ALTER TABLE sales_range MERGE PARTITIONS (sales_feb2022, sales_mar2022, sales_apr2022) INTO sales_feb_mar_apr2022; +-- ERROR: can not merge partition "sales_mar2022" together with partition "sales_jan2022" +-- DETAIL: lower bound of partition "sales_mar2022" is not equal to the upper bound of partition "sales_jan2022" +-- (space between sections sales_jan2022 and sales_mar2022) +ALTER TABLE sales_range MERGE PARTITIONS (sales_jan2022, sales_mar2022) INTO sales_jan_mar2022; +-- ERROR: can not merge partition "sales_jan2022" together with partition "sales_dec2021" +-- DETAIL: lower bound of partition "sales_jan2022" is not equal to the upper bound of partition "sales_dec2021" +-- (space between sections sales_dec2021 and sales_jan2022) +ALTER TABLE sales_range MERGE PARTITIONS (sales_dec2021, sales_jan2022, sales_feb2022) INTO sales_dec_jan_feb2022; +-- ERROR: partition with name "sales_feb2022" is already used +ALTER TABLE sales_range MERGE PARTITIONS (sales_feb2022, sales_mar2022, partitions_merge_schema.sales_feb2022) INTO sales_feb_mar_apr2022; +--ERROR, sales_apr_2 already exists +ALTER TABLE sales_range MERGE PARTITIONS (sales_feb2022, sales_mar2022, sales_jan2022) INTO sales_apr_2; + +CREATE VIEW jan2022v as SELECT * FROM sales_jan2022; +ALTER TABLE sales_range MERGE PARTITIONS (sales_jan2022, sales_feb2022) INTO sales_dec_jan_feb2022; +DROP VIEW jan2022v; + +-- NO ERROR: test for custom partitions order, source partitions not in the search_path +SET search_path = partitions_merge_schema2, public; +ALTER TABLE partitions_merge_schema.sales_range MERGE PARTITIONS ( + partitions_merge_schema.sales_feb2022, + partitions_merge_schema.sales_mar2022, + partitions_merge_schema.sales_jan2022) INTO sales_jan_feb_mar2022; +SET search_path = partitions_merge_schema, public; + +PREPARE get_partition_info(regclass[]) AS +SELECT c.oid::pg_catalog.regclass, + c.relpersistence, + c.relkind, + i.inhdetachpending, + pg_catalog.pg_get_expr(c.relpartbound, c.oid) +FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i +WHERE c.oid = i.inhrelid AND i.inhparent = ANY($1) +ORDER BY pg_catalog.pg_get_expr(c.relpartbound, c.oid) = 'DEFAULT', + c.oid::regclass::text COLLATE "C"; + +EXECUTE get_partition_info('{sales_range}'); + +DROP TABLE sales_range; + +-- +-- Add rows into partitioned table, then merge partitions +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'); +CREATE TABLE sales_mar2022 PARTITION OF sales_range FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'); +CREATE TABLE sales_apr2022 PARTITION OF sales_range FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; +CREATE INDEX sales_range_sales_date_idx ON sales_range USING btree (sales_date); + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +SELECT pg_catalog.pg_get_partkeydef('sales_range'::regclass); + +-- show partitions with conditions: +EXECUTE get_partition_info('{sales_range}'); + +-- check schema-qualified name of the new partition +ALTER TABLE sales_range MERGE PARTITIONS (sales_feb2022, sales_mar2022, sales_apr2022) INTO partitions_merge_schema2.sales_feb_mar_apr2022; + +-- show partitions with conditions: +EXECUTE get_partition_info('{sales_range}'); + +SELECT * FROM pg_indexes WHERE tablename = 'sales_feb_mar_apr2022' and schemaname = 'partitions_merge_schema2'; + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +-- Use indexscan for testing indexes +SET enable_seqscan = OFF; + +EXPLAIN (COSTS OFF) SELECT * FROM partitions_merge_schema2.sales_feb_mar_apr2022 where sales_date > '2022-01-01'; +SELECT * FROM partitions_merge_schema2.sales_feb_mar_apr2022 where sales_date > '2022-01-01'; + +RESET enable_seqscan; + +DROP TABLE sales_range; + +-- +-- Merge some partitions into DEFAULT partition +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'); +CREATE TABLE sales_mar2022 PARTITION OF sales_range FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'); +CREATE TABLE sales_apr2022 PARTITION OF sales_range FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; +CREATE INDEX sales_range_sales_date_idx ON sales_range USING btree (sales_date); + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +-- Merge partitions (include DEFAULT partition) into partition with the same +-- name +ALTER TABLE sales_range MERGE PARTITIONS + (sales_jan2022, sales_mar2022, partitions_merge_schema.sales_others) INTO sales_others; + +SELECT * FROM sales_others ORDER BY salesperson_id; + +-- show partitions with conditions: +EXECUTE get_partition_info('{sales_range}'); + +DROP TABLE sales_range; + +-- +-- Test for: +-- * composite partition key; +-- * GENERATED column; +-- * column with DEFAULT value. +-- +CREATE TABLE sales_date (salesperson_name VARCHAR(30), sales_year INT, sales_month INT, sales_day INT, + sales_date VARCHAR(10) GENERATED ALWAYS AS + (LPAD(sales_year::text, 4, '0') || '.' || LPAD(sales_month::text, 2, '0') || '.' || LPAD(sales_day::text, 2, '0')) STORED, + sales_department VARCHAR(30) DEFAULT 'Sales department') + PARTITION BY RANGE (sales_year, sales_month, sales_day); + +CREATE TABLE sales_dec2022 PARTITION OF sales_date FOR VALUES FROM (2021, 12, 1) TO (2022, 1, 1); +CREATE TABLE sales_jan2022 PARTITION OF sales_date FOR VALUES FROM (2022, 1, 1) TO (2022, 2, 1); +CREATE TABLE sales_feb2022 PARTITION OF sales_date FOR VALUES FROM (2022, 2, 1) TO (2022, 3, 1); +CREATE TABLE sales_other PARTITION OF sales_date FOR VALUES FROM (2022, 3, 1) TO (MAXVALUE, MAXVALUE, MAXVALUE); + +INSERT INTO sales_date(salesperson_name, sales_year, sales_month, sales_day) VALUES + ('Manager1', 2021, 12, 7), + ('Manager2', 2021, 12, 8), + ('Manager3', 2022, 1, 1), + ('Manager1', 2022, 2, 4), + ('Manager2', 2022, 1, 2), + ('Manager3', 2022, 2, 1), + ('Manager1', 2022, 3, 3), + ('Manager2', 2022, 3, 4), + ('Manager3', 2022, 5, 1); + +SELECT tableoid::regclass, * FROM sales_date; + +ALTER TABLE sales_date MERGE PARTITIONS (sales_jan2022, sales_feb2022) INTO sales_jan_feb2022; + +INSERT INTO sales_date(salesperson_name, sales_year, sales_month, sales_day) VALUES + ('Manager1', 2022, 1, 10), + ('Manager2', 2022, 2, 10); + +SELECT tableoid::regclass, * FROM sales_date; +DROP TABLE sales_date; + +-- +-- Test: merge partitions of partitioned table with triggers +-- +CREATE TABLE salespeople(salesperson_id INT PRIMARY KEY, salesperson_name VARCHAR(30)) PARTITION BY RANGE (salesperson_id); + +CREATE TABLE salespeople01_10 PARTITION OF salespeople FOR VALUES FROM (1) TO (10); +CREATE TABLE salespeople10_20 PARTITION OF salespeople FOR VALUES FROM (10) TO (20); +CREATE TABLE salespeople20_30 PARTITION OF salespeople FOR VALUES FROM (20) TO (30); +CREATE TABLE salespeople30_40 PARTITION OF salespeople FOR VALUES FROM (30) TO (40); + +INSERT INTO salespeople VALUES (1, 'Poirot'); + +CREATE OR REPLACE FUNCTION after_insert_row_trigger() RETURNS trigger LANGUAGE 'plpgsql' AS $BODY$ +BEGIN + RAISE NOTICE 'trigger(%) called: action = %, when = %, level = %', TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL; + RETURN NULL; +END; +$BODY$; + +CREATE TRIGGER salespeople_after_insert_statement_trigger + AFTER INSERT + ON salespeople + FOR EACH STATEMENT + EXECUTE PROCEDURE after_insert_row_trigger('salespeople'); + +CREATE TRIGGER salespeople_after_insert_row_trigger + AFTER INSERT + ON salespeople + FOR EACH ROW + EXECUTE PROCEDURE after_insert_row_trigger('salespeople'); + +-- 2 triggers should fire here (row + statement): +INSERT INTO salespeople VALUES (10, 'May'); +-- 1 trigger should fire here (row): +INSERT INTO salespeople10_20 VALUES (19, 'Ivanov'); + +ALTER TABLE salespeople MERGE PARTITIONS (salespeople10_20, salespeople20_30, salespeople30_40) INTO salespeople10_40; + +-- 2 triggers should fire here (row + statement): +INSERT INTO salespeople VALUES (20, 'Smirnoff'); +-- 1 trigger should fire here (row): +INSERT INTO salespeople10_40 VALUES (30, 'Ford'); + +SELECT * FROM salespeople01_10; +SELECT * FROM salespeople10_40; + +DROP TABLE salespeople; +DROP FUNCTION after_insert_row_trigger(); + +-- +-- Test: merge partitions with deleted columns +-- +CREATE TABLE salespeople(salesperson_id INT PRIMARY KEY, salesperson_name VARCHAR(30)) PARTITION BY RANGE (salesperson_id); + +CREATE TABLE salespeople01_10 PARTITION OF salespeople FOR VALUES FROM (1) TO (10); +-- Create partitions with some deleted columns: +CREATE TABLE salespeople10_20(d1 VARCHAR(30), salesperson_id INT PRIMARY KEY, salesperson_name VARCHAR(30)); +CREATE TABLE salespeople20_30(salesperson_id INT PRIMARY KEY, d2 INT, salesperson_name VARCHAR(30)); +CREATE TABLE salespeople30_40(salesperson_id INT PRIMARY KEY, d3 DATE, salesperson_name VARCHAR(30)); + +INSERT INTO salespeople10_20 VALUES ('dummy value 1', 19, 'Ivanov'); +INSERT INTO salespeople20_30 VALUES (20, 101, 'Smirnoff'); +INSERT INTO salespeople30_40 VALUES (31, now(), 'Popov'); + +ALTER TABLE salespeople10_20 DROP COLUMN d1; +ALTER TABLE salespeople20_30 DROP COLUMN d2; +ALTER TABLE salespeople30_40 DROP COLUMN d3; + +ALTER TABLE salespeople ATTACH PARTITION salespeople10_20 FOR VALUES FROM (10) TO (20); +ALTER TABLE salespeople ATTACH PARTITION salespeople20_30 FOR VALUES FROM (20) TO (30); +ALTER TABLE salespeople ATTACH PARTITION salespeople30_40 FOR VALUES FROM (30) TO (40); + +INSERT INTO salespeople VALUES + (1, 'Poirot'), + (10, 'May'), + (30, 'Ford'); + +ALTER TABLE salespeople MERGE PARTITIONS (salespeople10_20, salespeople20_30, salespeople30_40) INTO salespeople10_40; + +select * from salespeople; +select * from salespeople01_10; +select * from salespeople10_40; + +DROP TABLE salespeople; + +-- +-- Test: merge sub-partitions +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'); +CREATE TABLE sales_mar2022 PARTITION OF sales_range FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'); + +CREATE TABLE sales_apr2022 (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_apr2022_01_10 PARTITION OF sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-04-10'); +CREATE TABLE sales_apr2022_10_20 PARTITION OF sales_apr2022 FOR VALUES FROM ('2022-04-10') TO ('2022-04-20'); +CREATE TABLE sales_apr2022_20_30 PARTITION OF sales_apr2022 FOR VALUES FROM ('2022-04-20') TO ('2022-05-01'); +ALTER TABLE sales_range ATTACH PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'); + +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +CREATE INDEX sales_range_sales_date_idx ON sales_range USING btree (sales_date); + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +SELECT tableoid::regclass, * FROM sales_apr2022 ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +ALTER TABLE sales_apr2022 MERGE PARTITIONS (sales_apr2022_01_10, sales_apr2022_10_20, sales_apr2022_20_30) INTO sales_apr_all; + +SELECT tableoid::regclass, * FROM sales_apr2022 ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE sales_range; + +-- +-- BY LIST partitioning +-- + +-- +-- Test: specific errors for BY LIST partitioning +-- +CREATE TABLE sales_list +(salesperson_id INT GENERATED ALWAYS AS IDENTITY, + salesperson_name VARCHAR(30), + sales_state VARCHAR(20), + sales_amount INT, + sales_date DATE) +PARTITION BY LIST (sales_state); +CREATE TABLE sales_nord PARTITION OF sales_list FOR VALUES IN ('Oslo', 'St. Petersburg', 'Helsinki'); +CREATE TABLE sales_west PARTITION OF sales_list FOR VALUES IN ('Lisbon', 'New York', 'Madrid'); +CREATE TABLE sales_east PARTITION OF sales_list FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'); +CREATE TABLE sales_central PARTITION OF sales_list FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv'); +CREATE TABLE sales_others PARTITION OF sales_list DEFAULT; + + +CREATE TABLE sales_list2 (LIKE sales_list) PARTITION BY LIST (sales_state); +CREATE TABLE sales_nord2 PARTITION OF sales_list2 FOR VALUES IN ('Oslo', 'St. Petersburg', 'Helsinki'); +CREATE TABLE sales_others2 PARTITION OF sales_list2 DEFAULT; + + +CREATE TABLE sales_external (LIKE sales_list); +CREATE TABLE sales_external2 (vch VARCHAR(5)); + +-- ERROR: "sales_external" is not a partition of partitioned table "sales_list" +ALTER TABLE sales_list MERGE PARTITIONS (sales_west, sales_east, sales_external) INTO sales_all; +-- ERROR: "sales_external2" is not a partition of partitioned table "sales_list" +ALTER TABLE sales_list MERGE PARTITIONS (sales_west, sales_east, sales_external2) INTO sales_all; +-- ERROR: relation "sales_nord2" is not a partition of relation "sales_list" +ALTER TABLE sales_list MERGE PARTITIONS (sales_west, sales_nord2, sales_east) INTO sales_all; + +DROP TABLE sales_external2; +DROP TABLE sales_external; +DROP TABLE sales_list2; +DROP TABLE sales_list; + +-- +-- Test: BY LIST partitioning, MERGE PARTITIONS with data +-- +CREATE TABLE sales_list +(salesperson_id INT GENERATED ALWAYS AS IDENTITY, + salesperson_name VARCHAR(30), + sales_state VARCHAR(20), + sales_amount INT, + sales_date DATE) +PARTITION BY LIST (sales_state); + +CREATE INDEX sales_list_salesperson_name_idx ON sales_list USING btree (salesperson_name); +CREATE INDEX sales_list_sales_state_idx ON sales_list USING btree (sales_state); + +CREATE TABLE sales_nord PARTITION OF sales_list FOR VALUES IN ('Oslo', 'St. Petersburg', 'Helsinki'); +CREATE TABLE sales_west PARTITION OF sales_list FOR VALUES IN ('Lisbon', 'New York', 'Madrid'); +CREATE TABLE sales_east PARTITION OF sales_list FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'); +CREATE TABLE sales_central PARTITION OF sales_list FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv'); +CREATE TABLE sales_others PARTITION OF sales_list DEFAULT; + +INSERT INTO sales_list (salesperson_name, sales_state, sales_amount, sales_date) VALUES + ('Trump', 'Bejing', 1000, '2022-03-01'), + ('Smirnoff', 'New York', 500, '2022-03-03'), + ('Ford', 'St. Petersburg', 2000, '2022-03-05'), + ('Ivanov', 'Warsaw', 750, '2022-03-04'), + ('Deev', 'Lisbon', 250, '2022-03-07'), + ('Poirot', 'Berlin', 1000, '2022-03-01'), + ('May', 'Helsinki', 1200, '2022-03-06'), + ('Li', 'Vladivostok', 1150, '2022-03-09'), + ('May', 'Helsinki', 1200, '2022-03-11'), + ('Halder', 'Oslo', 800, '2022-03-02'), + ('Muller', 'Madrid', 650, '2022-03-05'), + ('Smith', 'Kyiv', 350, '2022-03-10'), + ('Gandi', 'Warsaw', 150, '2022-03-08'), + ('Plato', 'Lisbon', 950, '2022-03-05'); + +-- show partitions with conditions: +EXECUTE get_partition_info('{sales_list}'); + +ALTER TABLE sales_list MERGE PARTITIONS (sales_west, sales_east, sales_central) INTO sales_all; + +-- show partitions with conditions: +EXECUTE get_partition_info('{sales_list}'); + +SELECT tableoid::regclass, * FROM sales_list ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +-- Use indexscan for testing indexes after merging partitions +SET enable_seqscan = OFF; + +EXPLAIN (COSTS OFF) SELECT * FROM sales_all WHERE sales_state = 'Warsaw'; +SELECT * FROM sales_all WHERE sales_state = 'Warsaw'; +EXPLAIN (COSTS OFF) SELECT * FROM sales_list WHERE sales_state = 'Warsaw'; +SELECT * FROM sales_list WHERE sales_state = 'Warsaw'; +EXPLAIN (COSTS OFF) SELECT * FROM sales_list WHERE salesperson_name = 'Ivanov'; +SELECT * FROM sales_list WHERE salesperson_name = 'Ivanov'; + +RESET enable_seqscan; + +DROP TABLE sales_list; + +-- +-- Try to MERGE partitions of another table. +-- +CREATE TABLE t1 (i int, a int, b int, c int) PARTITION BY RANGE (a, b); +CREATE TABLE t1p1 PARTITION OF t1 FOR VALUES FROM (1, 1) TO (1, 2); +CREATE TABLE t2 (i int, t text) PARTITION BY RANGE (t); +CREATE TABLE t2pa PARTITION OF t2 FOR VALUES FROM ('A') TO ('C'); +CREATE TABLE t3 (i int, t text); + +-- ERROR: relation "t1p1" is not a partition of relation "t2" +ALTER TABLE t2 MERGE PARTITIONS (t1p1, t2pa) INTO t2p; +-- ERROR: "t3" is not a partition of partitioned table "t2" +ALTER TABLE t2 MERGE PARTITIONS (t2pa, t3) INTO t2p; + +DROP TABLE t3; +DROP TABLE t2; +DROP TABLE t1; + + +-- +-- Check the partition index name if the partition name is the same as one +-- of the merged partitions. +-- +CREATE TABLE t (i int, PRIMARY KEY(i)) PARTITION BY RANGE (i); + +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); + +CREATE INDEX tidx ON t(i); +ALTER TABLE t MERGE PARTITIONS (tp_1_2, tp_0_1) INTO tp_1_2; + +-- Indexname values should be 'tp_1_2_pkey' and 'tp_1_2_i_idx'. +-- \d+ tp_1_2 + +DROP TABLE t; + +-- +-- Try to MERGE partitions of temporary table. +-- +BEGIN; +SHOW search_path; +CREATE TEMP TABLE t (i int) PARTITION BY RANGE (i) ON COMMIT DROP; +CREATE TEMP TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TEMP TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); +CREATE TEMP TABLE tp_2_3 PARTITION OF t FOR VALUES FROM (2) TO (3); +CREATE TEMP TABLE tp_3_4 PARTITION OF t FOR VALUES FROM (3) TO (4); + +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO pg_temp.tp_0_2; +ALTER TABLE t MERGE PARTITIONS (tp_0_2, tp_2_3) INTO pg_temp.tp_0_3; + +-- Partition should be temporary. +EXECUTE get_partition_info('{t}'); +-- ERROR: cannot create a permanent relation as partition of temporary relation "t" +ALTER TABLE t MERGE PARTITIONS (tp_0_3, tp_3_4) INTO tp_0_4; +ROLLBACK; + +-- +-- Try mixing permanent and temporary partitions. +-- +BEGIN; +SET search_path = partitions_merge_schema, pg_temp, public; +CREATE TABLE t (i int) PARTITION BY RANGE (i); +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); + +SELECT c.oid::pg_catalog.regclass, c.relpersistence FROM pg_catalog.pg_class c WHERE c.oid = 't'::regclass; +EXECUTE get_partition_info('{t}'); +SAVEPOINT s; + +SET search_path = pg_temp, partitions_merge_schema, public; +-- Can't merge persistent partitions into a temporary partition +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; + +ROLLBACK TO SAVEPOINT s; +SET search_path = partitions_merge_schema, public; +-- Can't merge persistent partitions into a temporary partition +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO pg_temp.tp_0_2; +ROLLBACK; + +BEGIN; +SET search_path = pg_temp, partitions_merge_schema, public; +CREATE TABLE t (i int) PARTITION BY RANGE (i); +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); + +SELECT c.oid::pg_catalog.regclass, c.relpersistence FROM pg_catalog.pg_class c WHERE c.oid = 't'::regclass; +EXECUTE get_partition_info('{t}'); + +SET search_path = partitions_merge_schema, pg_temp, public; + +-- Can't merge temporary partitions into a persistent partition +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; +ROLLBACK; + +DEALLOCATE get_partition_info; + +-- Check the new partition inherits parent's tablespace +SET search_path = partitions_merge_schema, public; +CREATE TABLE t (i int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) + PARTITION BY RANGE (i) TABLESPACE regress_tblspace; +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; +SELECT tablename, tablespace FROM pg_tables + WHERE tablename IN ('t', 'tp_0_2') AND schemaname = 'partitions_merge_schema' + ORDER BY tablename COLLATE "C", tablespace COLLATE "C"; +SELECT tablename, indexname, tablespace FROM pg_indexes + WHERE tablename IN ('t', 'tp_0_2') AND schemaname = 'partitions_merge_schema' + ORDER BY tablename COLLATE "C", indexname COLLATE "C", tablespace COLLATE "C"; +DROP TABLE t; + +-- Check the new partition inherits parent's table access method +SET search_path = partitions_merge_schema, public; +CREATE ACCESS METHOD partitions_merge_heap TYPE TABLE HANDLER heap_tableam_handler; +CREATE TABLE t (i int) PARTITION BY RANGE (i) USING partitions_merge_heap; +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; +SELECT c.relname, a.amname +FROM pg_class c JOIN pg_am a ON c.relam = a.oid +WHERE c.oid IN ('t'::regclass, 'tp_0_2'::regclass) +ORDER BY c.relname COLLATE "C"; +DROP TABLE t; +DROP ACCESS METHOD partitions_merge_heap; + +-- Test permission checks. The user needs to own the parent table and all +-- the merging partitions to do the merge. +CREATE ROLE regress_partition_merge_alice; +CREATE ROLE regress_partition_merge_bob; +GRANT ALL ON SCHEMA partitions_merge_schema TO regress_partition_merge_alice; +GRANT ALL ON SCHEMA partitions_merge_schema TO regress_partition_merge_bob; + +SET SESSION AUTHORIZATION regress_partition_merge_alice; +CREATE TABLE t (i int) PARTITION BY RANGE (i); +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); + +SET SESSION AUTHORIZATION regress_partition_merge_bob; +-- ERROR: must be owner of table t +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; +RESET SESSION AUTHORIZATION; + +ALTER TABLE t OWNER TO regress_partition_merge_bob; +SET SESSION AUTHORIZATION regress_partition_merge_bob; +-- ERROR: must be owner of table tp_0_1 +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; +RESET SESSION AUTHORIZATION; + +ALTER TABLE tp_0_1 OWNER TO regress_partition_merge_bob; +SET SESSION AUTHORIZATION regress_partition_merge_bob; +-- ERROR: must be owner of table tp_1_2 +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; +RESET SESSION AUTHORIZATION; + +ALTER TABLE tp_1_2 OWNER TO regress_partition_merge_bob; +SET SESSION AUTHORIZATION regress_partition_merge_bob; +-- Ok: +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; +RESET SESSION AUTHORIZATION; + +DROP TABLE t; + +-- Test: we can't merge partitions with different owners +CREATE TABLE tp_0_1(i int); +ALTER TABLE tp_0_1 OWNER TO regress_partition_merge_alice; +CREATE TABLE tp_1_2(i int); +ALTER TABLE tp_1_2 OWNER TO regress_partition_merge_bob; + +CREATE TABLE t (i int) PARTITION BY RANGE (i); + +ALTER TABLE t ATTACH PARTITION tp_0_1 FOR VALUES FROM (0) TO (1); +ALTER TABLE t ATTACH PARTITION tp_1_2 FOR VALUES FROM (1) TO (2); + +-- Owner is 'regress_partition_merge_alice': +-- \dt tp_0_1 +-- Owner is 'regress_partition_merge_bob': +-- \dt tp_1_2 + +-- ERROR: partitions being merged have different owners +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; + +DROP TABLE t; +REVOKE ALL ON SCHEMA partitions_merge_schema FROM regress_partition_merge_alice; +REVOKE ALL ON SCHEMA partitions_merge_schema FROM regress_partition_merge_bob; +DROP ROLE regress_partition_merge_alice; +DROP ROLE regress_partition_merge_bob; + + +-- Test for hash partitioned table +CREATE TABLE t (i int) PARTITION BY HASH(i); +CREATE TABLE tp1 PARTITION OF t FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE tp2 PARTITION OF t FOR VALUES WITH (MODULUS 2, REMAINDER 1); + +-- ERROR: partition of hash-partitioned table cannot be merged +ALTER TABLE t MERGE PARTITIONS (tp1, tp2) INTO tp3; + +-- ERROR: list of partitions to be merged should include at least two partitions +ALTER TABLE t MERGE PARTITIONS (tp1) INTO tp3; + +DROP TABLE t; + + +-- Test for merged partition properties: +-- * STATISTICS is empty +-- * COMMENT is empty +-- * DEFAULTS are the same as DEFAULTS for partitioned table +-- * STORAGE is the same as STORAGE for partitioned table +-- * GENERATED and CONSTRAINTS are the same as GENERATED and CONSTRAINTS for partitioned table +-- * TRIGGERS are the same as TRIGGERS for partitioned table +-- \set HIDE_TOAST_COMPRESSION false + +CREATE TABLE t +(i int NOT NULL, + t text STORAGE EXTENDED COMPRESSION pglz DEFAULT 'default_t', + b bigint, + d date GENERATED ALWAYS as ('2022-01-01') STORED) PARTITION BY RANGE (abs(i)); +COMMENT ON COLUMN t.i IS 't1.i'; + +CREATE TABLE tp_0_1 +(i int NOT NULL, + t text STORAGE MAIN DEFAULT 'default_tp_0_1', + b bigint, + d date GENERATED ALWAYS as ('2022-02-02') STORED); +ALTER TABLE t ATTACH PARTITION tp_0_1 FOR VALUES FROM (0) TO (1); +COMMENT ON COLUMN tp_0_1.i IS 'tp_0_1.i'; + +CREATE TABLE tp_1_2 +(i int NOT NULL, + t text STORAGE MAIN DEFAULT 'default_tp_1_2', + b bigint, + d date GENERATED ALWAYS as ('2022-03-03') STORED); +ALTER TABLE t ATTACH PARTITION tp_1_2 FOR VALUES FROM (1) TO (2); +COMMENT ON COLUMN tp_1_2.i IS 'tp_1_2.i'; + +CREATE STATISTICS t_stat (DEPENDENCIES) on i, b from t; +CREATE STATISTICS tp_0_1_stat (DEPENDENCIES) on i, b from tp_0_1; +CREATE STATISTICS tp_1_2_stat (DEPENDENCIES) on i, b from tp_1_2; + +ALTER TABLE t ADD CONSTRAINT t_b_check CHECK (b > 0); +ALTER TABLE t ADD CONSTRAINT t_b_check1 CHECK (b > 0) NOT ENFORCED; +ALTER TABLE t ADD CONSTRAINT t_b_check2 CHECK (b > 0) NOT VALID; +ALTER TABLE t ADD CONSTRAINT t_b_nn NOT NULL b NOT VALID; + +INSERT INTO tp_0_1(i, t, b) VALUES(0, DEFAULT, 1); +INSERT INTO tp_1_2(i, t, b) VALUES(1, DEFAULT, 2); +CREATE OR REPLACE FUNCTION trigger_function() RETURNS trigger LANGUAGE 'plpgsql' AS +$BODY$ +BEGIN + RAISE NOTICE 'trigger(%) called: action = %, when = %, level = %', TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL; + RETURN new; +END; +$BODY$; + +CREATE TRIGGER t_before_insert_row_trigger BEFORE INSERT ON t FOR EACH ROW + EXECUTE PROCEDURE trigger_function('t'); +CREATE TRIGGER tp_0_1_before_insert_row_trigger BEFORE INSERT ON tp_0_1 FOR EACH ROW + EXECUTE PROCEDURE trigger_function('tp_0_1'); +CREATE TRIGGER tp_1_2_before_insert_row_trigger BEFORE INSERT ON tp_1_2 FOR EACH ROW + EXECUTE PROCEDURE trigger_function('tp_1_2'); + +-- \d+ tp_0_1 +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_1; +-- \d+ tp_0_1 + +INSERT INTO t(i, t, b) VALUES(1, DEFAULT, 3); +SELECT tableoid::regclass, * FROM t ORDER BY b; +DROP TABLE t; +DROP FUNCTION trigger_function(); +-- \set HIDE_TOAST_COMPRESSION true + + +-- Test MERGE PARTITIONS with not valid foreign key constraint +CREATE TABLE t (i INT PRIMARY KEY) PARTITION BY RANGE (i); +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); +INSERT INTO t VALUES (0), (1); +CREATE TABLE t_fk (i INT); +INSERT INTO t_fk VALUES (1), (2); +ALTER TABLE t_fk ADD CONSTRAINT t_fk_i_fkey FOREIGN KEY (i) REFERENCES t NOT VALID; +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; + +-- Should be NOT VALID FOREIGN KEY +-- \d tp_0_2 +-- ERROR: insert or update on table "t_fk" violates foreign key constraint "t_fk_i_fkey" +ALTER TABLE t_fk VALIDATE CONSTRAINT t_fk_i_fkey; + +DROP TABLE t_fk; +DROP TABLE t; + +-- Test MERGE PARTITIONS with not enforced foreign key constraint +CREATE TABLE t (i INT PRIMARY KEY) PARTITION BY RANGE (i); +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); +INSERT INTO t VALUES (0), (1); +CREATE TABLE t_fk (i INT); +INSERT INTO t_fk VALUES (1), (2); + +ALTER TABLE t_fk ADD CONSTRAINT t_fk_i_fkey FOREIGN KEY (i) REFERENCES t NOT ENFORCED; +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; + +-- Should be NOT ENFORCED FOREIGN KEY +-- \d tp_0_2 +-- ERROR: insert or update on table "t_fk" violates foreign key constraint "t_fk_i_fkey" +ALTER TABLE t_fk ALTER CONSTRAINT t_fk_i_fkey ENFORCED; + +DROP TABLE t_fk; +DROP TABLE t; + + +-- Test for recomputation of stored generated columns. +CREATE TABLE t (i int, tab_id int generated always as (tableoid) stored) PARTITION BY RANGE (i); +CREATE TABLE tp_0_1 PARTITION OF t FOR VALUES FROM (0) TO (1); +CREATE TABLE tp_1_2 PARTITION OF t FOR VALUES FROM (1) TO (2); +ALTER TABLE t ADD CONSTRAINT cc CHECK(tableoid <> 123456789); +INSERT INTO t VALUES (0), (1); + +-- Should be 0 because partition identifier for row with i=0 is different from +-- partition identifier for row with i=1. +SELECT count(*) FROM t WHERE i = 0 AND tab_id IN (SELECT tab_id FROM t WHERE i = 1); + +-- "tab_id" column (stored generated column) with "tableoid" attribute requires +-- recomputation here. +ALTER TABLE t MERGE PARTITIONS (tp_0_1, tp_1_2) INTO tp_0_2; + +-- Should be 1 because partition identifier for row with i=0 is the same as +-- partition identifier for row with i=1. +SELECT count(*) FROM t WHERE i = 0 AND tab_id IN (SELECT tab_id FROM t WHERE i = 1); + +DROP TABLE t; + + +-- Test for generated columns (different order of columns in partitioned table +-- and partitions). +CREATE TABLE t (i int, g int GENERATED ALWAYS AS (i + tableoid::int)) PARTITION BY RANGE (i); +CREATE TABLE tp_1 (g int GENERATED ALWAYS AS (i + tableoid::int), i int); +CREATE TABLE tp_2 (g int GENERATED ALWAYS AS (i + tableoid::int), i int); +ALTER TABLE t ATTACH PARTITION tp_1 FOR VALUES FROM (-1) TO (10); +ALTER TABLE t ATTACH PARTITION tp_2 FOR VALUES FROM (10) TO (20); +ALTER TABLE t ADD CHECK (g > 0); +ALTER TABLE t ADD CHECK (i > 0); +INSERT INTO t VALUES (5), (15); + +ALTER TABLE t MERGE PARTITIONS (tp_1, tp_2) INTO tp_12; + +INSERT INTO t VALUES (16); +-- ERROR: new row for relation "tp_12" violates check constraint "t_i_check" +INSERT INTO t VALUES (0); +-- Should be 3 rows: (5), (15), (16): +SELECT i FROM t ORDER BY i; +-- Should be 1 because for the same tableoid (15 + tableoid) = (5 + tableoid) + 10: +SELECT count(*) FROM t WHERE i = 15 AND g IN (SELECT g + 10 FROM t WHERE i = 5); + +DROP TABLE t; + + +RESET search_path; + +-- +DROP SCHEMA partitions_merge_schema; +DROP SCHEMA partitions_merge_schema2; diff --git a/crates/squawk_parser/tests/data/regression_suite/partition_prune.sql b/crates/squawk_parser/tests/data/regression_suite/partition_prune.sql index dc4af9be..e812fb8f 100644 --- a/crates/squawk_parser/tests/data/regression_suite/partition_prune.sql +++ b/crates/squawk_parser/tests/data/regression_suite/partition_prune.sql @@ -1287,6 +1287,7 @@ create table hp_prefix_test (a int, b int, c int, d int) -- create 8 partitions select 'create table hp_prefix_test_p' || x::text || ' partition of hp_prefix_test for values with (modulus 8, remainder ' || x::text || ');' from generate_Series(0,7) x; +-- \gexec -- insert 16 rows, one row for each test to perform. insert into hp_prefix_test @@ -1305,12 +1306,14 @@ from -- and equality quals. This may seem a little excessive, but there have been -- a number of bugs in this area over the years. We make use of row only -- output to reduce the size of the expected results. +-- \t on select 'explain (costs off) select tableoid::regclass,* from hp_prefix_test where ' || string_agg(c.colname || case when g.s & (1 << c.colpos) = 0 then ' is null' else ' = ' || (colpos+1)::text end, ' and ' order by c.colpos) from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) group by g.s order by g.s; +-- \gexec -- And ensure we get exactly 1 row from each. Again, all 16 possible combinations. select @@ -1319,6 +1322,8 @@ select from (values('a',0),('b',1),('c',2),('d',3)) c(colname, colpos), generate_Series(0,15) g(s) group by g.s order by g.s; +-- \gexec +-- \t off drop table hp_prefix_test; @@ -1366,12 +1371,12 @@ create view part_abc_view as select * from part_abc where b <> 'a' with check op prepare update_part_abc_view as update part_abc_view set b = $2 where a = $1 returning *; -- Only the unpruned partition should be shown in the list of relations to be -- updated -explain (costs off) execute update_part_abc_view (1, 'd'); +explain (verbose, costs off) execute update_part_abc_view (1, 'd'); execute update_part_abc_view (1, 'd'); -explain (costs off) execute update_part_abc_view (2, 'a'); +explain (verbose, costs off) execute update_part_abc_view (2, 'a'); execute update_part_abc_view (2, 'a'); -- All pruned. -explain (costs off) execute update_part_abc_view (3, 'a'); +explain (verbose, costs off) execute update_part_abc_view (3, 'a'); execute update_part_abc_view (3, 'a'); deallocate update_part_abc_view; diff --git a/crates/squawk_parser/tests/data/regression_suite/partition_split.sql b/crates/squawk_parser/tests/data/regression_suite/partition_split.sql new file mode 100644 index 00000000..5140ca4d --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/partition_split.sql @@ -0,0 +1,1134 @@ +-- +-- PARTITION_SPLIT +-- Tests for "ALTER TABLE ... SPLIT PARTITION ..." command +-- + +CREATE SCHEMA partition_split_schema; +CREATE SCHEMA partition_split_schema2; +SET search_path = partition_split_schema, public; + +-- +-- BY RANGE partitioning +-- + +-- +-- Test for error codes +-- +CREATE TABLE sales_range (salesperson_id int, sales_date date) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb_mar_apr2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-05-01'); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +-- ERROR: relation "sales_xxx" does not exist +ALTER TABLE sales_range SPLIT PARTITION sales_xxx INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: relation "sales_jan2022" already exists +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_jan2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: invalid bound specification for a range partition +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_jan2022 FOR VALUES IN ('2022-05-01', '2022-06-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: empty range bound specified for partition "sales_mar2022" +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-02-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +--ERROR: list of split partitions should contain at least two items +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-10-01')); + +-- ERROR: lower bound of partition "sales_feb2022" is not equal to lower bound of split partition "sales_feb_mar_apr2022" +-- HINT: ALTER TABLE ... SPLIT PARTITION require combined bounds of new partitions must exactly match the bound of the split partition +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-01-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: partition with name "sales_feb_mar_apr2022" is already used +-- (We can create partition with the same name as split partition, but can't create two partitions with the same name) +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb_mar_apr2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_feb_mar_apr2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: partition with name "sales_feb2022" is already used +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: partition with name "sales_feb2022" is already used +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION partition_split_schema.sales_feb2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: ALTER action SPLIT PARTITION cannot be performed on relation "sales_feb_mar_apr2022" +-- DETAIL: This operation is not supported for tables. +ALTER TABLE sales_feb_mar_apr2022 SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_jan2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- ERROR: upper bound of partition "sales_apr2022" is not equal to upper bound of split partition "sales_feb_mar_apr2022" +-- HINT: ALTER TABLE ... SPLIT PARTITION require combined bounds of new partitions must exactly match the bound of the split partition +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-06-01')); + +-- ERROR: can not split to partition "sales_mar2022" together with partition "sales_feb2022" +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-02-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- Tests for spaces between partitions, them should be executed without DEFAULT partition +ALTER TABLE sales_range DETACH PARTITION sales_others; + +-- ERROR: lower bound of partition "sales_feb2022" is not equal to lower bound of split partition "sales_feb_mar_apr2022" +-- HINT: ALTER TABLE ... SPLIT PARTITION require combined bounds of new partitions must exactly match the bound of the split partition +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-02') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- Check the source partition not in the search path +SET search_path = partition_split_schema2, public; +ALTER TABLE partition_split_schema.sales_range +SPLIT PARTITION partition_split_schema.sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); +SET search_path = partition_split_schema, public; +-- \d+ sales_range + +DROP TABLE sales_range; +DROP TABLE sales_others; + +-- Additional tests for error messages, no default partition +CREATE TABLE sales_range (sales_date date) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb_mar_apr2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-05-01'); + +-- ERROR: upper bound of partition "sales_apr2022" is not equal to upper bound of split partition "sales_feb_mar_apr2022" +-- HINT: ALTER TABLE ... SPLIT PARTITION require combined bounds of new partitions must exactly match the bound of the split partition +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-06-01')); + +DROP TABLE sales_range; + +-- +-- Add rows into partitioned table then split partition +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb_mar_apr2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-05-01'); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; +DROP TABLE sales_range CASCADE; + +-- +-- Add split partition, then add rows into partitioned table +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb_mar_apr2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-05-01'); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +-- Split partition, also check schema qualification of new partitions +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION partition_split_schema.sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION partition_split_schema2.sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); +-- \d+ sales_range + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE sales_range CASCADE; + +-- +-- Test for: +-- * composite partition key; +-- * GENERATED column; +-- * column with DEFAULT value. +-- +CREATE TABLE sales_date (salesperson_name VARCHAR(30), sales_year INT, sales_month INT, sales_day INT, + sales_date VARCHAR(10) GENERATED ALWAYS AS + (LPAD(sales_year::text, 4, '0') || '.' || LPAD(sales_month::text, 2, '0') || '.' || LPAD(sales_day::text, 2, '0')) STORED, + sales_department VARCHAR(30) DEFAULT 'Sales department') + PARTITION BY RANGE (sales_year, sales_month, sales_day); + +CREATE TABLE sales_dec2021 PARTITION OF sales_date FOR VALUES FROM (2021, 12, 1) TO (2022, 1, 1); +CREATE TABLE sales_jan_feb2022 PARTITION OF sales_date FOR VALUES FROM (2022, 1, 1) TO (2022, 3, 1); +CREATE TABLE sales_other PARTITION OF sales_date FOR VALUES FROM (2022, 3, 1) TO (MAXVALUE, MAXVALUE, MAXVALUE); + +INSERT INTO sales_date(salesperson_name, sales_year, sales_month, sales_day) VALUES + ('Manager1', 2021, 12, 7), + ('Manager2', 2021, 12, 8), + ('Manager3', 2022, 1, 1), + ('Manager1', 2022, 2, 4), + ('Manager2', 2022, 1, 2), + ('Manager3', 2022, 2, 1), + ('Manager1', 2022, 3, 3), + ('Manager2', 2022, 3, 4), + ('Manager3', 2022, 5, 1); + +SELECT tableoid::regclass, * FROM sales_date ORDER BY tableoid::regclass::text COLLATE "C", sales_year, sales_month, sales_day; + +ALTER TABLE sales_date SPLIT PARTITION sales_jan_feb2022 INTO + (PARTITION sales_jan2022 FOR VALUES FROM (2022, 1, 1) TO (2022, 2, 1), + PARTITION sales_feb2022 FOR VALUES FROM (2022, 2, 1) TO (2022, 3, 1)); + +INSERT INTO sales_date(salesperson_name, sales_year, sales_month, sales_day) VALUES + ('Manager1', 2022, 1, 10), + ('Manager2', 2022, 2, 10); + +SELECT tableoid::regclass, * FROM sales_date ORDER BY tableoid::regclass::text COLLATE "C", sales_year, sales_month, sales_day; + +DROP TABLE sales_date CASCADE; + +-- +-- Test: split DEFAULT partition; use an index on partition key; check index after split +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; +CREATE INDEX sales_range_sales_date_idx ON sales_range USING btree (sales_date); + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +SELECT * FROM sales_others; +SELECT * FROM pg_indexes WHERE tablename = 'sales_others' and schemaname = 'partition_split_schema' ORDER BY indexname COLLATE "C"; + +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'), + PARTITION sales_others DEFAULT); + +-- Use indexscan for testing indexes +SET enable_seqscan = OFF; + +EXPLAIN (COSTS OFF) SELECT * FROM sales_feb2022 where sales_date > '2022-01-01'; +SELECT * FROM sales_feb2022 where sales_date > '2022-01-01'; +EXPLAIN (COSTS OFF) SELECT * FROM sales_mar2022 where sales_date > '2022-01-01'; +SELECT * FROM sales_mar2022 where sales_date > '2022-01-01'; +EXPLAIN (COSTS OFF) SELECT * FROM sales_apr2022 where sales_date > '2022-01-01'; +SELECT * FROM sales_apr2022 where sales_date > '2022-01-01'; +EXPLAIN (COSTS OFF) SELECT * FROM sales_others where sales_date > '2022-01-01'; +SELECT * FROM sales_others where sales_date > '2022-01-01'; + +RESET enable_seqscan; + +SELECT * FROM pg_indexes +WHERE tablename in ('sales_feb2022', 'sales_mar2022', 'sales_apr2022', 'sales_others') +AND schemaname = 'partition_split_schema' +ORDER BY indexname COLLATE "C"; + +DROP TABLE sales_range CASCADE; + +-- +-- Test: some cases for splitting DEFAULT partition (different bounds) +-- +CREATE TABLE sales_range (salesperson_id INT, sales_date date) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +-- sales_error intersects with sales_dec2021 (lower bound) +-- ERROR: can not split to partition "sales_error" together with partition "sales_dec2021" +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_dec2021 FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_error FOR VALUES FROM ('2021-12-30') TO ('2022-02-01'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_others DEFAULT); + +-- sales_error intersects with sales_feb2022 (upper bound) +-- ERROR: can not split to partition "sales_feb2022" together with partition "sales_error" +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_dec2021 FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_error FOR VALUES FROM ('2022-01-01') TO ('2022-02-02'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_others DEFAULT); + +-- sales_error intersects with sales_dec2021 (inside bound) +-- ERROR: can not split to partition "sales_error" together with partition "sales_dec2021" +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_dec2021 FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_error FOR VALUES FROM ('2021-12-10') TO ('2021-12-20'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_others DEFAULT); + +-- sales_error intersects with sales_dec2021 (exactly the same bounds) +-- ERROR: can not split to partition "sales_error" together with partition "sales_dec2021" +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_dec2021 FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_error FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_others DEFAULT); + +-- ERROR: can not split DEFAULT partition "sales_others" +-- HINT: To split DEFAULT partition one of the new partition msut be DEFAULT +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_dec2021 FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_jan2022 FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01')); + +-- no error: bounds of sales_noerror are between sales_dec2021 and sales_feb2022 +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_dec2021 FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_noerror FOR VALUES FROM ('2022-01-10') TO ('2022-01-20'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_others DEFAULT); + +DROP TABLE sales_range; + +CREATE TABLE sales_range (sales_date date) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +-- no error: bounds of sales_noerror are equal to lower and upper bounds of sales_dec2021 and sales_feb2022 +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_dec2021 FOR VALUES FROM ('2021-12-01') TO ('2022-01-01'), + PARTITION sales_noerror FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_others DEFAULT); + +DROP TABLE sales_range; + +-- +-- Test: split partition with CHECK and FOREIGN KEY CONSTRAINTs on partitioned table +-- +CREATE TABLE salespeople(salesperson_id INT PRIMARY KEY, salesperson_name VARCHAR(30)); +INSERT INTO salespeople VALUES (1, 'Poirot'); + +CREATE TABLE sales_range ( +salesperson_id INT REFERENCES salespeople(salesperson_id), +sales_amount INT CHECK (sales_amount > 1), +sales_date DATE) PARTITION BY RANGE (sales_date); + +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb_mar_apr2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-05-01'); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'sales_feb_mar_apr2022'::regclass::oid ORDER BY conname COLLATE "C"; + +ALTER TABLE sales_range SPLIT PARTITION sales_feb_mar_apr2022 INTO + (PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_mar2022 FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'), + PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01')); + +-- We should see the same CONSTRAINTs as on sales_feb_mar_apr2022 partition +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'sales_feb2022'::regclass::oid ORDER BY conname COLLATE "C"; +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'sales_mar2022'::regclass::oid ORDER BY conname COLLATE "C"; +SELECT pg_get_constraintdef(oid), conname, conkey FROM pg_constraint WHERE conrelid = 'sales_apr2022'::regclass::oid ORDER BY conname COLLATE "C"; + +-- ERROR: new row for relation "sales_mar2022" violates check constraint "sales_range_sales_amount_check" +INSERT INTO sales_range VALUES (1, 0, '2022-03-11'); +-- ERROR: insert or update on table "sales_mar2022" violates foreign key constraint "sales_range_salesperson_id_fkey" +INSERT INTO sales_range VALUES (-1, 10, '2022-03-11'); +-- ok +INSERT INTO sales_range VALUES (1, 10, '2022-03-11'); + +DROP TABLE sales_range CASCADE; +DROP TABLE salespeople CASCADE; + +-- +-- Test: split partition on partitioned table in case of existing FOREIGN KEY reference from another table +-- +CREATE TABLE salespeople(salesperson_id INT PRIMARY KEY, salesperson_name VARCHAR(30)) PARTITION BY RANGE (salesperson_id); +CREATE TABLE sales (salesperson_id INT REFERENCES salespeople(salesperson_id), sales_amount INT, sales_date DATE); + +CREATE TABLE salespeople01_10 PARTITION OF salespeople FOR VALUES FROM (1) TO (10); +CREATE TABLE salespeople10_40 PARTITION OF salespeople FOR VALUES FROM (10) TO (40); + +INSERT INTO salespeople VALUES + (1, 'Poirot'), + (10, 'May'), + (19, 'Ivanov'), + (20, 'Smirnoff'), + (30, 'Ford'); + +INSERT INTO sales VALUES + (1, 100, '2022-03-01'), + (1, 110, '2022-03-02'), + (10, 150, '2022-03-01'), + (10, 90, '2022-03-03'), + (19, 200, '2022-03-04'), + (20, 50, '2022-03-12'), + (20, 170, '2022-03-02'), + (30, 30, '2022-03-04'); + +SELECT tableoid::regclass, * FROM salespeople ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +ALTER TABLE salespeople SPLIT PARTITION salespeople10_40 INTO + (PARTITION salespeople10_20 FOR VALUES FROM (10) TO (20), + PARTITION salespeople20_30 FOR VALUES FROM (20) TO (30), + PARTITION salespeople30_40 FOR VALUES FROM (30) TO (40)); + +SELECT tableoid::regclass, * FROM salespeople ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +-- ERROR: insert or update on table "sales" violates foreign key constraint "sales_salesperson_id_fkey" +INSERT INTO sales VALUES (40, 50, '2022-03-04'); +-- ok +INSERT INTO sales VALUES (30, 50, '2022-03-04'); + +DROP TABLE sales CASCADE; +DROP TABLE salespeople CASCADE; + +-- +-- Test: split partition of partitioned table with triggers +-- +CREATE TABLE salespeople(salesperson_id INT PRIMARY KEY, salesperson_name VARCHAR(30)) PARTITION BY RANGE (salesperson_id); + +CREATE TABLE salespeople01_10 PARTITION OF salespeople FOR VALUES FROM (1) TO (10); +CREATE TABLE salespeople10_40 PARTITION OF salespeople FOR VALUES FROM (10) TO (40); + +INSERT INTO salespeople VALUES (1, 'Poirot'); + +CREATE OR REPLACE FUNCTION after_insert_row_trigger() RETURNS trigger LANGUAGE 'plpgsql' AS $BODY$ +BEGIN + RAISE NOTICE 'trigger(%) called: action = %, when = %, level = %', TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL; + RETURN NULL; +END; +$BODY$; + +CREATE TRIGGER salespeople_after_insert_statement_trigger + AFTER INSERT + ON salespeople + FOR EACH STATEMENT + EXECUTE PROCEDURE after_insert_row_trigger('salespeople'); + +CREATE TRIGGER salespeople_after_insert_row_trigger + AFTER INSERT + ON salespeople + FOR EACH ROW + EXECUTE PROCEDURE after_insert_row_trigger('salespeople'); + +-- 2 triggers should fire here (row + statement): +INSERT INTO salespeople VALUES (10, 'May'); +-- 1 trigger should fire here (row): +INSERT INTO salespeople10_40 VALUES (19, 'Ivanov'); + +ALTER TABLE salespeople SPLIT PARTITION salespeople10_40 INTO + (PARTITION salespeople10_20 FOR VALUES FROM (10) TO (20), + PARTITION salespeople20_30 FOR VALUES FROM (20) TO (30), + PARTITION salespeople30_40 FOR VALUES FROM (30) TO (40)); + +-- 2 triggers should fire here (row + statement): +INSERT INTO salespeople VALUES (20, 'Smirnoff'); +-- 1 trigger should fire here (row): +INSERT INTO salespeople30_40 VALUES (30, 'Ford'); + +SELECT tableoid::regclass, * FROM salespeople ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE salespeople CASCADE; +DROP FUNCTION after_insert_row_trigger(); + +-- +-- Test: split partition witch identity column +-- If split partition column is identity column, columns of new partitions are identity columns too. +-- +CREATE TABLE salespeople(salesperson_id INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY, salesperson_name VARCHAR(30)) PARTITION BY RANGE (salesperson_id); + +CREATE TABLE salespeople1_2 PARTITION OF salespeople FOR VALUES FROM (1) TO (2); +-- Create new partition with identity column: +CREATE TABLE salespeople2_5(salesperson_id INT NOT NULL, salesperson_name VARCHAR(30)); +ALTER TABLE salespeople ATTACH PARTITION salespeople2_5 FOR VALUES FROM (2) TO (5); + +INSERT INTO salespeople (salesperson_name) VALUES ('Poirot'), ('Ivanov'); + +ALTER TABLE salespeople SPLIT PARTITION salespeople2_5 INTO + (PARTITION salespeople2_3 FOR VALUES FROM (2) TO (3), + PARTITION salespeople3_4 FOR VALUES FROM (3) TO (4), + PARTITION salespeople4_5 FOR VALUES FROM (4) TO (5)); + +INSERT INTO salespeople (salesperson_name) VALUES ('May'), ('Ford'); + +SELECT tableoid::regclass, * FROM salespeople ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +-- check new partitions have identity or not after split partition +SELECT attrelid::regclass, attname, attidentity, attgenerated FROM pg_attribute +WHERE attnum > 0 +AND attrelid::regclass IN ( + 'salespeople2_3'::regclass, 'salespeople', 'salespeople2_3', + 'salespeople1_2', 'salespeople3_4', 'salespeople4_5') +ORDER BY attrelid::regclass::text COLLATE "C", attnum; + +DROP TABLE salespeople CASCADE; + +-- +-- Test: split partition with deleted columns +-- +CREATE TABLE salespeople(salesperson_id INT PRIMARY KEY, salesperson_name VARCHAR(30)) PARTITION BY RANGE (salesperson_id); + +CREATE TABLE salespeople01_10 PARTITION OF salespeople FOR VALUES FROM (1) TO (10); +-- Create new partition with some deleted columns: +CREATE TABLE salespeople10_40(d1 VARCHAR(30), salesperson_id INT PRIMARY KEY, d2 INT, d3 DATE, salesperson_name VARCHAR(30)); + +INSERT INTO salespeople10_40 VALUES + ('dummy value 1', 19, 100, now(), 'Ivanov'), + ('dummy value 2', 20, 101, now(), 'Smirnoff'); + +ALTER TABLE salespeople10_40 DROP COLUMN d1; +ALTER TABLE salespeople10_40 DROP COLUMN d2; +ALTER TABLE salespeople10_40 DROP COLUMN d3; + +ALTER TABLE salespeople ATTACH PARTITION salespeople10_40 FOR VALUES FROM (10) TO (40); + +INSERT INTO salespeople VALUES + (1, 'Poirot'), + (10, 'May'), + (30, 'Ford'); + +ALTER TABLE salespeople SPLIT PARTITION salespeople10_40 INTO + (PARTITION salespeople10_20 FOR VALUES FROM (10) TO (20), + PARTITION salespeople20_30 FOR VALUES FROM (20) TO (30), + PARTITION salespeople30_40 FOR VALUES FROM (30) TO (40)); + +SELECT tableoid::regclass, * FROM salespeople ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE salespeople CASCADE; + +-- +-- Test: split sub-partition +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_feb2022 PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'); +CREATE TABLE sales_mar2022 PARTITION OF sales_range FOR VALUES FROM ('2022-03-01') TO ('2022-04-01'); + +CREATE TABLE sales_apr2022 (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_apr_all PARTITION OF sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'); +ALTER TABLE sales_range ATTACH PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'); + +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +CREATE INDEX sales_range_sales_date_idx ON sales_range USING btree (sales_date); + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +ALTER TABLE sales_apr2022 SPLIT PARTITION sales_apr_all INTO + (PARTITION sales_apr2022_01_10 FOR VALUES FROM ('2022-04-01') TO ('2022-04-10'), + PARTITION sales_apr2022_10_20 FOR VALUES FROM ('2022-04-10') TO ('2022-04-20'), + PARTITION sales_apr2022_20_30 FOR VALUES FROM ('2022-04-20') TO ('2022-05-01')); + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE sales_range; + +-- +-- BY LIST partitioning +-- + +-- +-- Test: specific errors for BY LIST partitioning +-- +CREATE TABLE sales_list (sales_state VARCHAR(20)) PARTITION BY LIST (sales_state); + +CREATE TABLE sales_nord PARTITION OF sales_list FOR VALUES IN ('Oslo', 'St. Petersburg', 'Helsinki'); +CREATE TABLE sales_all PARTITION OF sales_list FOR VALUES IN ('Warsaw', 'Lisbon', 'New York', 'Madrid', 'Bejing', 'Berlin', 'Delhi', 'Kyiv', 'Vladivostok'); +CREATE TABLE sales_others PARTITION OF sales_list DEFAULT; + +-- ERROR: new partition "sales_east" would overlap with another (not split) partition "sales_nord" +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok', 'Helsinki'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv')); + +-- ERROR: new partition "sales_west" would overlap with another new partition "sales_central" +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Lisbon', 'Kyiv')); + +-- ERROR: new partition "sales_west" cannot have NULL value because split partition "sales_all" does not have +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid', NULL), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv')); + +-- ERROR: new partition "sales_west" cannot have this value because split partition "sales_all" does not have +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid', 'Melbourne'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv')); + +-- ERROR: new partition cannot be DEFAULT because DEFAULT partition "sales_others" already exists +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid', 'Melbourne'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv'), + PARTITION sales_others2 DEFAULT); + +DROP TABLE sales_list; + +-- Test for non-symbolic comparison of values (numeric values '0' and '0.0' are equal). +CREATE TABLE t (a numeric) PARTITION BY LIST (a); +CREATE TABLE t1 PARTITION OF t FOR VALUES in ('0', '1'); +-- ERROR: new partition "x" would overlap with another new partition "x1" +ALTER TABLE t SPLIT PARTITION t1 INTO + (PARTITION x FOR VALUES IN ('0'), + PARTITION x1 FOR VALUES IN ('0.0', '1')); +DROP TABLE t; + +-- +-- Test: two specific errors for BY LIST partitioning: +-- * new partitions do not have NULL value, which split partition has. +-- * new partitions do not have a value that split partition has. +-- +CREATE TABLE sales_list(sales_state VARCHAR(20)) PARTITION BY LIST (sales_state); + +CREATE TABLE sales_nord PARTITION OF sales_list FOR VALUES IN ('Helsinki', 'St. Petersburg', 'Oslo'); +CREATE TABLE sales_all PARTITION OF sales_list FOR VALUES IN ('Warsaw', 'Lisbon', 'New York', 'Madrid', 'Bejing', 'Berlin', 'Delhi', 'Kyiv', 'Vladivostok', NULL); + +-- ERROR: new partitions combined partition bounds do not contain value (NULL) but split partition "sales_all" does +-- HINT: ALTER TABLE ... SPLIT PARTITION require combined bounds of new partitions must exactly match the bound of the split partition +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv')); + +-- ERROR: new partitions combined partition bounds do not contain value ('Kyiv'::character varying(20)) but split partition "sales_all" does +-- HINT: ALTER TABLE ... SPLIT PARTITION require combined bounds of new partitions must exactly match the bound of the split partition +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', NULL)); + +-- ERROR DEFAULT partition should be one +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv'), + PARTITION sales_others DEFAULT, + PARTITION sales_others2 DEFAULT); + +DROP TABLE sales_list; + +-- +-- Test: BY LIST partitioning, SPLIT PARTITION with data +-- +CREATE TABLE sales_list +(salesperson_id SERIAL, + salesperson_name VARCHAR(30), + sales_state VARCHAR(20), + sales_amount INT, + sales_date DATE) +PARTITION BY LIST (sales_state); + +CREATE INDEX sales_list_salesperson_name_idx ON sales_list USING btree (salesperson_name); +CREATE INDEX sales_list_sales_state_idx ON sales_list USING btree (sales_state); + +CREATE TABLE sales_nord PARTITION OF sales_list FOR VALUES IN ('Helsinki', 'St. Petersburg', 'Oslo'); +CREATE TABLE sales_all PARTITION OF sales_list FOR VALUES IN ('Warsaw', 'Lisbon', 'New York', 'Madrid', 'Bejing', 'Berlin', 'Delhi', 'Kyiv', 'Vladivostok'); +CREATE TABLE sales_others PARTITION OF sales_list DEFAULT; + +INSERT INTO sales_list (salesperson_name, sales_state, sales_amount, sales_date) VALUES + ('Trump', 'Bejing', 1000, '2022-03-01'), + ('Smirnoff', 'New York', 500, '2022-03-03'), + ('Ford', 'St. Petersburg', 2000, '2022-03-05'), + ('Ivanov', 'Warsaw', 750, '2022-03-04'), + ('Deev', 'Lisbon', 250, '2022-03-07'), + ('Poirot', 'Berlin', 1000, '2022-03-01'), + ('May', 'Oslo', 1200, '2022-03-06'), + ('Li', 'Vladivostok', 1150, '2022-03-09'), + ('May', 'Oslo', 1200, '2022-03-11'), + ('Halder', 'Helsinki', 800, '2022-03-02'), + ('Muller', 'Madrid', 650, '2022-03-05'), + ('Smith', 'Kyiv', 350, '2022-03-10'), + ('Gandi', 'Warsaw', 150, '2022-03-08'), + ('Plato', 'Lisbon', 950, '2022-03-05'); + +ALTER TABLE sales_list SPLIT PARTITION sales_all INTO + (PARTITION sales_west FOR VALUES IN ('Lisbon', 'New York', 'Madrid'), + PARTITION sales_east FOR VALUES IN ('Bejing', 'Delhi', 'Vladivostok'), + PARTITION sales_central FOR VALUES IN ('Warsaw', 'Berlin', 'Kyiv')); + +SELECT tableoid::regclass, * FROM sales_list ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +-- Use indexscan for testing indexes after splitting partition +SET enable_seqscan = OFF; + +EXPLAIN (COSTS OFF) SELECT * FROM sales_central WHERE sales_state = 'Warsaw'; +SELECT * FROM sales_central WHERE sales_state = 'Warsaw'; +EXPLAIN (COSTS OFF) SELECT * FROM sales_list WHERE sales_state = 'Warsaw'; +SELECT * FROM sales_list WHERE sales_state = 'Warsaw'; +EXPLAIN (COSTS OFF) SELECT * FROM sales_list WHERE salesperson_name = 'Ivanov'; +SELECT * FROM sales_list WHERE salesperson_name = 'Ivanov'; + +RESET enable_seqscan; + +DROP TABLE sales_list; + +-- +-- Test for: +-- * split DEFAULT partition to partitions with spaces between bounds; +-- * random order of partitions in SPLIT PARTITION command. +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_others PARTITION OF sales_range DEFAULT; + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-09'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-07'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'), + (14, 'Smith', 510, '2022-05-04'); + +ALTER TABLE sales_range SPLIT PARTITION sales_others INTO + (PARTITION sales_others DEFAULT, + PARTITION sales_mar2022_1decade FOR VALUES FROM ('2022-03-01') TO ('2022-03-10'), + PARTITION sales_jan2022_1decade FOR VALUES FROM ('2022-01-01') TO ('2022-01-10'), + PARTITION sales_feb2022_1decade FOR VALUES FROM ('2022-02-01') TO ('2022-02-10'), + PARTITION sales_apr2022_1decade FOR VALUES FROM ('2022-04-01') TO ('2022-04-10')); + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE sales_range; + +-- +-- Test for: +-- * split non-DEFAULT partition to partitions with spaces between bounds; +-- * random order of partitions in SPLIT PARTITION command. +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_all PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-05-01'); + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-09'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-07'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'); + +ALTER TABLE sales_range SPLIT PARTITION sales_all INTO + (PARTITION sales_mar2022_1decade FOR VALUES FROM ('2022-03-01') TO ('2022-03-10'), + PARTITION sales_jan2022_1decade FOR VALUES FROM ('2022-01-01') TO ('2022-01-10'), + PARTITION sales_feb2022_1decade FOR VALUES FROM ('2022-02-01') TO ('2022-02-10'), + PARTITION sales_apr2022_1decade FOR VALUES FROM ('2022-04-01') TO ('2022-04-10'), + PARTITION sales_others DEFAULT); + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE sales_range; + +-- +-- Test for split non-DEFAULT partition to DEFAULT partition + partitions +-- with spaces between bounds. +-- +CREATE TABLE sales_range (salesperson_id INT, salesperson_name VARCHAR(30), sales_amount INT, sales_date DATE) PARTITION BY RANGE (sales_date); +CREATE TABLE sales_jan2022 PARTITION OF sales_range FOR VALUES FROM ('2022-01-01') TO ('2022-02-01'); +CREATE TABLE sales_all PARTITION OF sales_range FOR VALUES FROM ('2022-02-01') TO ('2022-05-01'); + +INSERT INTO sales_range VALUES + (1, 'May', 1000, '2022-01-31'), + (2, 'Smirnoff', 500, '2022-02-10'), + (3, 'Ford', 2000, '2022-04-30'), + (4, 'Ivanov', 750, '2022-04-13'), + (5, 'Deev', 250, '2022-04-07'), + (6, 'Poirot', 150, '2022-02-11'), + (7, 'Li', 175, '2022-03-08'), + (8, 'Ericsson', 185, '2022-02-23'), + (9, 'Muller', 250, '2022-03-11'), + (10, 'Halder', 350, '2022-01-28'), + (11, 'Trump', 380, '2022-04-06'), + (12, 'Plato', 350, '2022-03-19'), + (13, 'Gandi', 377, '2022-01-09'); + +ALTER TABLE sales_range SPLIT PARTITION sales_all INTO + (PARTITION sales_apr2022 FOR VALUES FROM ('2022-04-01') TO ('2022-05-01'), + PARTITION sales_feb2022 FOR VALUES FROM ('2022-02-01') TO ('2022-03-01'), + PARTITION sales_others DEFAULT); + +INSERT INTO sales_range VALUES (14, 'Smith', 510, '2022-05-04'); + +SELECT tableoid::regclass, * FROM sales_range ORDER BY tableoid::regclass::text COLLATE "C", salesperson_id; + +DROP TABLE sales_range; + +-- +-- Try to SPLIT partition of another table. +-- +CREATE TABLE t1(i int, t text) PARTITION BY LIST (t); +CREATE TABLE t1pa PARTITION OF t1 FOR VALUES IN ('A'); +CREATE TABLE t2 (i int, t text) PARTITION BY RANGE (t); + +-- ERROR: relation "t1pa" is not a partition of relation "t2" +ALTER TABLE t2 SPLIT PARTITION t1pa INTO + (PARTITION t2a FOR VALUES FROM ('A') TO ('B'), + PARTITION t2b FOR VALUES FROM ('B') TO ('C')); + +DROP TABLE t2; +DROP TABLE t1; + +-- +-- Try to SPLIT partition of temporary table. +-- +CREATE TEMP TABLE t (i int) PARTITION BY RANGE (i); +CREATE TEMP TABLE tp_0_2 PARTITION OF t FOR VALUES FROM (0) TO (2); + +SELECT c.oid::pg_catalog.regclass, pg_catalog.pg_get_expr(c.relpartbound, c.oid), c.relpersistence + FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i + WHERE c.oid = i.inhrelid AND i.inhparent = 't'::regclass + ORDER BY pg_catalog.pg_get_expr(c.relpartbound, c.oid) = 'DEFAULT', c.oid::pg_catalog.regclass::pg_catalog.text COLLATE "C"; + +-- ERROR: cannot create a permanent relation as partition of temporary relation "t" +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); + +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION pg_temp.tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION pg_temp.tp_1_2 FOR VALUES FROM (1) TO (2)); + +-- Partitions should be temporary. +SELECT c.oid::pg_catalog.regclass, pg_catalog.pg_get_expr(c.relpartbound, c.oid), c.relpersistence + FROM pg_catalog.pg_class c, pg_catalog.pg_inherits i + WHERE c.oid = i.inhrelid AND i.inhparent = 't'::regclass + ORDER BY pg_catalog.pg_get_expr(c.relpartbound, c.oid) = 'DEFAULT', c.oid::pg_catalog.regclass::pg_catalog.text COLLATE "C"; + +DROP TABLE t; + +-- Check the new partitions inherit parent's tablespace +CREATE TABLE t (i int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) + PARTITION BY RANGE (i) TABLESPACE regress_tblspace; +CREATE TABLE tp_0_2 PARTITION OF t FOR VALUES FROM (0) TO (2); +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); +SELECT tablename, tablespace FROM pg_tables + WHERE tablename IN ('t', 'tp_0_1', 'tp_1_2') AND schemaname = 'partition_split_schema' + ORDER BY tablename COLLATE "C", tablespace COLLATE "C"; +SELECT tablename, indexname, tablespace FROM pg_indexes + WHERE tablename IN ('t', 'tp_0_1', 'tp_1_2') AND schemaname = 'partition_split_schema' + ORDER BY tablename COLLATE "C", indexname COLLATE "C", tablespace COLLATE "C"; +DROP TABLE t; + +-- Check new partitions inherits parent's table access method +CREATE ACCESS METHOD partition_split_heap TYPE TABLE HANDLER heap_tableam_handler; +CREATE TABLE t (i int) PARTITION BY RANGE (i) USING partition_split_heap; +CREATE TABLE tp_0_2 PARTITION OF t FOR VALUES FROM (0) TO (2); +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); +SELECT c.relname, a.amname +FROM pg_class c JOIN pg_am a ON c.relam = a.oid +WHERE c.oid IN ('t'::regclass, 'tp_0_1'::regclass, 'tp_1_2'::regclass) +ORDER BY c.relname COLLATE "C"; +DROP TABLE t; +DROP ACCESS METHOD partition_split_heap; + +-- Split partition of a temporary table when one of the partitions after +-- split has the same name as the partition being split +CREATE TEMP TABLE t (a int) PARTITION BY RANGE (a); +CREATE TEMP TABLE tp_0 PARTITION OF t FOR VALUES FROM (0) TO (2); +ALTER TABLE t SPLIT PARTITION tp_0 INTO + (PARTITION pg_temp.tp_0 FOR VALUES FROM (0) TO (1), + PARTITION pg_temp.tp_1 FOR VALUES FROM (1) TO (2)); +DROP TABLE t; + +-- Check defaults and constraints of new partitions +CREATE TABLE t_bigint ( + b bigint, + i int DEFAULT (3+10), + j int DEFAULT 101, + k int GENERATED ALWAYS AS (b+10) STORED +) +PARTITION BY RANGE (b); +CREATE TABLE t_bigint_default PARTITION OF t_bigint DEFAULT; +-- Show defaults/constraints before SPLIT PARTITION +-- \d+ t_bigint +-- \d+ t_bigint_default +ALTER TABLE t_bigint SPLIT PARTITION t_bigint_default INTO + (PARTITION t_bigint_01_10 FOR VALUES FROM (0) TO (10), + PARTITION t_bigint_default DEFAULT); +-- Show defaults/constraints after SPLIT PARTITION +-- \d+ t_bigint_default +-- \d+ t_bigint_01_10 +DROP TABLE t_bigint; + +-- Test permission checks. The user needs to own the parent table and the +-- the partition to split to do the split. +CREATE ROLE regress_partition_split_alice; +CREATE ROLE regress_partition_split_bob; +GRANT ALL ON SCHEMA partition_split_schema TO regress_partition_split_alice; +GRANT ALL ON SCHEMA partition_split_schema TO regress_partition_split_bob; + +SET SESSION AUTHORIZATION regress_partition_split_alice; +CREATE TABLE t (i int) PARTITION BY RANGE (i); +CREATE TABLE tp_0_2 PARTITION OF t FOR VALUES FROM (0) TO (2); + +SET SESSION AUTHORIZATION regress_partition_split_bob; +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); --error +RESET SESSION AUTHORIZATION; + +ALTER TABLE t OWNER TO regress_partition_split_bob; +SET SESSION AUTHORIZATION regress_partition_split_bob; +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); --error +RESET SESSION AUTHORIZATION; + +ALTER TABLE tp_0_2 OWNER TO regress_partition_split_bob; +SET SESSION AUTHORIZATION regress_partition_split_bob; +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); --ok +RESET SESSION AUTHORIZATION; + +DROP TABLE t; + +-- Test: owner of new partitions should be the same as owner of split partition +CREATE TABLE t (i int) PARTITION BY RANGE (i); + +SET SESSION AUTHORIZATION regress_partition_split_alice; +CREATE TABLE tp_0_2(i int); +RESET SESSION AUTHORIZATION; + +ALTER TABLE t ATTACH PARTITION tp_0_2 FOR VALUES FROM (0) TO (2); + +-- Owner is 'regress_partition_split_alice': +-- \dt tp_0_2 + +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); + +-- Owner should be 'regress_partition_split_alice': +-- \dt tp_0_1 +-- \dt tp_1_2 + +DROP TABLE t; + +-- Test: index of new partitions should be created with same owner as split +-- partition +SET SESSION AUTHORIZATION regress_partition_split_alice; +CREATE TABLE t (i int) PARTITION BY RANGE (i); +CREATE TABLE tp_10_20 PARTITION OF t FOR VALUES FROM (10) TO (20); +INSERT INTO t VALUES (11), (16); +CREATE OR REPLACE FUNCTION run_me(integer) RETURNS integer AS $$ +BEGIN + RAISE NOTICE 'you are running me as %', CURRENT_USER; + RETURN $1; +END +$$ LANGUAGE PLPGSQL IMMUTABLE; + +-- Owner is 'regress_partition_split_alice': +CREATE INDEX ON t (run_me(i)); +RESET SESSION AUTHORIZATION; + +-- Owner should be 'regress_partition_split_alice': +ALTER TABLE t SPLIT PARTITION tp_10_20 INTO + (PARTITION tp_10_15 FOR VALUES FROM (10) TO (15), + PARTITION tp_15_20 FOR VALUES FROM (15) TO (20)); + +DROP TABLE t; +DROP FUNCTION run_me(integer); + +REVOKE ALL ON SCHEMA partition_split_schema FROM regress_partition_split_alice; +REVOKE ALL ON SCHEMA partition_split_schema FROM regress_partition_split_bob; +DROP ROLE regress_partition_split_alice; +DROP ROLE regress_partition_split_bob; + +-- Test for hash partitioned table +CREATE TABLE t (i int) PARTITION BY HASH(i); +CREATE TABLE tp1 PARTITION OF t FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE tp2 PARTITION OF t FOR VALUES WITH (MODULUS 2, REMAINDER 1); + +-- ERROR: partition of hash-partitioned table cannot be split +ALTER TABLE t SPLIT PARTITION tp1 INTO + (PARTITION tp1_1 FOR VALUES WITH (MODULUS 4, REMAINDER 0), + PARTITION tp1_2 FOR VALUES WITH (MODULUS 4, REMAINDER 2)); + +-- ERROR: list of new partitions should contain at least two partitions +ALTER TABLE t SPLIT PARTITION tp1 INTO + (PARTITION tp1_1 FOR VALUES WITH (MODULUS 4, REMAINDER 0)); + +DROP TABLE t; + + +-- Test for split partition properties: +-- * STATISTICS is empty +-- * COMMENT is empty +-- * DEFAULTS are the same as DEFAULTS for partitioned table +-- * STORAGE is the same as STORAGE for partitioned table +-- * GENERATED and CONSTRAINTS are the same as GENERATED and CONSTRAINTS for partitioned table +-- * TRIGGERS are the same as TRIGGERS for partitioned table + +CREATE TABLE t +(i int NOT NULL, + t text STORAGE EXTENDED COMPRESSION pglz DEFAULT 'default_t', + b bigint, + d date GENERATED ALWAYS as ('2022-01-01') STORED) PARTITION BY RANGE (abs(i)); +COMMENT ON COLUMN t.i IS 't1.i'; + +CREATE TABLE tp_x +(i int NOT NULL, + t text STORAGE MAIN DEFAULT 'default_tp_x', + b bigint, + d date GENERATED ALWAYS as ('2022-02-02') STORED); +ALTER TABLE t ATTACH PARTITION tp_x FOR VALUES FROM (0) TO (2); +COMMENT ON COLUMN tp_x.i IS 'tp_x.i'; + +CREATE STATISTICS t_stat (DEPENDENCIES) on i, b from t; +CREATE STATISTICS tp_x_stat (DEPENDENCIES) on i, b from tp_x; + +ALTER TABLE t ADD CONSTRAINT t_b_check CHECK (b > 0); +ALTER TABLE t ADD CONSTRAINT t_b_check1 CHECK (b > 0) NOT ENFORCED; +ALTER TABLE t ADD CONSTRAINT t_b_check2 CHECK (b > 0) NOT VALID; +ALTER TABLE t ADD CONSTRAINT t_b_nn NOT NULL b NOT VALID; + +INSERT INTO tp_x(i, t, b) VALUES(0, DEFAULT, 1); +INSERT INTO tp_x(i, t, b) VALUES(1, DEFAULT, 2); + +CREATE OR REPLACE FUNCTION trigger_function() RETURNS trigger LANGUAGE 'plpgsql' AS +$BODY$ +BEGIN + RAISE NOTICE 'trigger(%) called: action = %, when = %, level = %', TG_ARGV[0], TG_OP, TG_WHEN, TG_LEVEL; + RETURN new; +END; +$BODY$; + +CREATE TRIGGER t_before_insert_row_trigger BEFORE INSERT ON t FOR EACH ROW + EXECUTE PROCEDURE trigger_function('t'); +CREATE TRIGGER tp_x_before_insert_row_trigger BEFORE INSERT ON tp_x FOR EACH ROW + EXECUTE PROCEDURE trigger_function('tp_x'); + +-- \d+ tp_x +ALTER TABLE t SPLIT PARTITION tp_x INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_x FOR VALUES FROM (1) TO (2)); +-- \d+ tp_x + +INSERT INTO t(i, t, b) VALUES(1, DEFAULT, 3); +SELECT tableoid::regclass, * FROM t ORDER BY tableoid::regclass::text COLLATE "C", b; +DROP TABLE t; +DROP FUNCTION trigger_function(); + + +-- Test for recomputation of stored generated columns. +CREATE TABLE t (i int, tab_id int generated always as (tableoid) stored) PARTITION BY RANGE (i); +CREATE TABLE tp_0_2 PARTITION OF t FOR VALUES FROM (0) TO (2); +ALTER TABLE t ADD CONSTRAINT cc CHECK(tableoid <> 123456789); +INSERT INTO t VALUES (0), (1); + +-- Should be 1 because partition identifier for row with i=0 is the same as +-- partition identifier for row with i=1. +SELECT count(*) FROM t WHERE i = 0 AND tab_id IN (SELECT tab_id FROM t WHERE i = 1); + +-- "tab_id" column (stored generated column) with "tableoid" attribute requires +-- recomputation here. +ALTER TABLE t SPLIT PARTITION tp_0_2 INTO + (PARTITION tp_0_1 FOR VALUES FROM (0) TO (1), + PARTITION tp_1_2 FOR VALUES FROM (1) TO (2)); + +-- Should be 0 because partition identifier for row with i=0 is different from +-- partition identifier for row with i=1. +SELECT count(*) FROM t WHERE i = 0 AND tab_id IN (SELECT tab_id FROM t WHERE i = 1); + +DROP TABLE t; + + +RESET search_path; + +-- +DROP SCHEMA partition_split_schema; +DROP SCHEMA partition_split_schema2; diff --git a/crates/squawk_parser/tests/data/regression_suite/password.sql b/crates/squawk_parser/tests/data/regression_suite/password.sql index 0a81d786..e7a98045 100644 --- a/crates/squawk_parser/tests/data/regression_suite/password.sql +++ b/crates/squawk_parser/tests/data/regression_suite/password.sql @@ -21,7 +21,7 @@ CREATE ROLE regress_passwd4 PASSWORD NULL; -- check list of created entries -- -- The scram secret will look something like: --- SCRAM-SHA-256$4096'E4HxLGtnRzsYwg'==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI='ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo'= +-- SCRAM-SHA-256$4096:E4HxLGtnRzsYwg==$6YtlR4t69SguDiwFvbVgVZtuz6gpJQQqUMZ7IQJK5yI=:ps75jrHeYU4lXCcXI4O8oIdJ3eO8o2jirjruw9phBTo= -- -- Since the salt is random, the exact value stored will be different on every test -- run. Use a regular expression to mask the changing parts. diff --git a/crates/squawk_parser/tests/data/regression_suite/pg_dependencies.sql b/crates/squawk_parser/tests/data/regression_suite/pg_dependencies.sql new file mode 100644 index 00000000..7935c540 --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/pg_dependencies.sql @@ -0,0 +1,133 @@ +-- Tests for type pg_distinct + +-- Invalid inputs +SELECT 'null'::pg_dependencies; +SELECT '{"a": 1}'::pg_dependencies; +SELECT '[]'::pg_dependencies; +SELECT '{}'::pg_dependencies; +SELECT '[null]'::pg_dependencies; +SELECT * FROM pg_input_error_info('null', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('{"a": 1}', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('{}', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[null]', 'pg_dependencies'); + +-- Invalid keys +SELECT '[{"attributes_invalid" : [2,3], "dependency" : 4}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "invalid" : 3, "dependency" : 4}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes_invalid" : [2,3], "dependency" : 4}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "invalid" : 3, "dependency" : 4}]', 'pg_dependencies'); + +-- Missing keys +SELECT '[{"attributes" : [2,3], "dependency" : 4}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "degree" : 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : 4}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : 4}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "degree" : 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : 4}]', 'pg_dependencies'); + +-- Valid keys, too many attributes +SELECT '[{"attributes" : [1,2,3,4,5,6,7,8], "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : [1,2,3,4,5,6,7,8], "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); + +-- Special characters +SELECT '[{"attributes" : ["\ud83d",3], "dependency" : 4, "degree": 0.250}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : "\ud83d", "degree": 0.250}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : 4, "degree": "\ud83d"}]'::pg_dependencies; +SELECT '[{"\ud83d" : [2,3], "dependency" : 4, "degree": 0.250}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : ["\ud83d",3], "dependency" : 4, "degree": 0.250}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : "\ud83d", "degree": 0.250}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : 4, "degree": "\ud83d"}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"\ud83d" : [2,3], "dependency" : 4, "degree": 0.250}]', 'pg_dependencies'); + +-- Valid keys, invalid values +SELECT '[{"attributes" : null, "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,null], "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : null, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,"a"], "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : "a", "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : [], "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : [null], "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : [1,null], "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : 1, "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : "a", "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : 4, "degree": NaN}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : null, "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,null], "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : null, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,"a"], "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : "a", "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : [], "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : [null], "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : [1,null], "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : 1, "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : "a", "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : 4, "degree": NaN}]', 'pg_dependencies'); + +SELECT '[{"attributes": [], "dependency": 2, "degree": 1}]' ::pg_dependencies; +SELECT '[{"attributes" : {"a": 1}, "dependency" : 4, "degree": "1.2"}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes": [], "dependency": 2, "degree": 1}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : {"a": 1}, "dependency" : 4, "degree": "1.2"}]', 'pg_dependencies'); + +SELECT '[{"dependency" : 4, "degree": "1.2"}]'::pg_dependencies; +SELECT '[{"attributes" : [1,2,3,4,5,6,7], "dependency" : 0, "degree": "1.2"}]'::pg_dependencies; +SELECT '[{"attributes" : [1,2,3,4,5,6,7], "dependency" : -9, "degree": "1.2"}]'::pg_dependencies; +SELECT '[{"attributes": [1,2], "dependency": 2, "degree": 1}]' ::pg_dependencies; +SELECT '[{"attributes" : [1, {}], "dependency" : 1, "degree": "1.2"}]'::pg_dependencies; +SELECT '[{"attributes" : [1,2], "dependency" : {}, "degree": 1.0}]'::pg_dependencies; +SELECT '[{"attributes" : [1,2], "dependency" : 3, "degree": {}}]'::pg_dependencies; +SELECT '[{"attributes" : [1,2], "dependency" : 1, "degree": "a"}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"dependency" : 4, "degree": "1.2"}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1,2,3,4,5,6,7], "dependency" : 0, "degree": "1.2"}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1,2,3,4,5,6,7], "dependency" : -9, "degree": "1.2"}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes": [1,2], "dependency": 2, "degree": 1}]' , 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1, {}], "dependency" : 1, "degree": "1.2"}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1,2], "dependency" : {}, "degree": 1.0}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1,2], "dependency" : 3, "degree": {}}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1,2], "dependency" : 1, "degree": "a"}]', 'pg_dependencies'); + +-- Funky degree values, which do not fail. +SELECT '[{"attributes" : [2], "dependency" : 4, "degree": "NaN"}]'::pg_dependencies; +SELECT '[{"attributes" : [2], "dependency" : 4, "degree": "-inf"}]'::pg_dependencies; +SELECT '[{"attributes" : [2], "dependency" : 4, "degree": "inf"}]'::pg_dependencies; +SELECT '[{"attributes" : [2], "dependency" : 4, "degree": "-inf"}]'::pg_dependencies::text::pg_dependencies; + +-- Duplicated keys +SELECT '[{"attributes" : [2,3], "attributes": [1,2], "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency" : 4, "dependency": 4, "degree": 1.000}]'::pg_dependencies; +SELECT '[{"attributes" : [2,3], "dependency": 4, "degree": 1.000, "degree": 1.000}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "attributes": [1,2], "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : 4, "dependency": 4, "degree": 1.000}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency": 4, "degree": 1.000, "degree": 1.000}]', 'pg_dependencies'); + +-- Invalid attnums +SELECT '[{"attributes" : [0,2], "dependency" : 4, "degree": 0.500}]'::pg_dependencies; +SELECT '[{"attributes" : [-7,-9], "dependency" : 4, "degree": 0.500}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : [0,2], "dependency" : 4, "degree": 0.500}]', 'pg_dependencies'); +SELECT * FROM pg_input_error_info('[{"attributes" : [-7,-9], "dependency" : 4, "degree": 0.500}]', 'pg_dependencies'); + +-- Duplicated attributes +SELECT '[{"attributes" : [2,2], "dependency" : 4, "degree": 0.500}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,2], "dependency" : 4, "degree": 0.500}]', 'pg_dependencies'); + +-- Duplicated attribute lists. +SELECT '[{"attributes" : [2,3], "dependency" : 4, "degree": 1.000}, + {"attributes" : [2,3], "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : 4, "degree": 1.000}, + {"attributes" : [2,3], "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); + +-- Valid inputs +SELECT '[{"attributes" : [2,3], "dependency" : 4, "degree": 0.250}, + {"attributes" : [2,-1], "dependency" : 4, "degree": 0.500}, + {"attributes" : [2,3,-1], "dependency" : 4, "degree": 0.750}, + {"attributes" : [2,3,-1,-2], "dependency" : 4, "degree": 1.000}]'::pg_dependencies; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "dependency" : 4, "degree": 0.250}, + {"attributes" : [2,-1], "dependency" : 4, "degree": 0.500}, + {"attributes" : [2,3,-1], "dependency" : 4, "degree": 0.750}, + {"attributes" : [2,3,-1,-2], "dependency" : 4, "degree": 1.000}]', 'pg_dependencies'); +-- Partially-covered attribute lists, possible as items with a degree of 0 +-- are discarded. +SELECT '[{"attributes" : [2,3], "dependency" : 4, "degree": 1.000}, + {"attributes" : [1,-1], "dependency" : 4, "degree": 1.000}, + {"attributes" : [2,3,-1], "dependency" : 4, "degree": 1.000}, + {"attributes" : [2,3,-1,-2], "dependency" : 4, "degree": 1.000}]'::pg_dependencies; diff --git a/crates/squawk_parser/tests/data/regression_suite/pg_ndistinct.sql b/crates/squawk_parser/tests/data/regression_suite/pg_ndistinct.sql new file mode 100644 index 00000000..319c7d48 --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/pg_ndistinct.sql @@ -0,0 +1,106 @@ +-- Tests for type pg_ndistinct + +-- Invalid inputs +SELECT 'null'::pg_ndistinct; +SELECT '{"a": 1}'::pg_ndistinct; +SELECT '[]'::pg_ndistinct; +SELECT '{}'::pg_ndistinct; +SELECT '[null]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('null', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('{"a": 1}', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('{}', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[null]', 'pg_ndistinct'); +-- Invalid keys +SELECT '[{"attributes_invalid" : [2,3], "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "invalid" : 3, "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "attributes" : [1,3], "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "ndistinct" : 4, "ndistinct" : 4}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"attributes_invalid" : [2,3], "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "invalid" : 3, "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "attributes" : [1,3], "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : 4, "ndistinct" : 4}]', 'pg_ndistinct'); + +-- Missing key +SELECT '[{"attributes" : [2,3]}]'::pg_ndistinct; +SELECT '[{"ndistinct" : 4}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3]}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"ndistinct" : 4}]', 'pg_ndistinct'); + +-- Valid keys, too many attributes +SELECT '[{"attributes" : [1,2,3,4,5,6,7,8,9], "ndistinct" : 4}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"attributes" : [1,2,3,4,5,6,7,8,9], "ndistinct" : 4}]', 'pg_ndistinct'); + +-- Special characters +SELECT '[{"\ud83d" : [1, 2], "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [1, 2], "\ud83d" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [1, 2], "ndistinct" : "\ud83d"}]'::pg_ndistinct; +SELECT '[{"attributes" : ["\ud83d", 2], "ndistinct" : 1}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"\ud83d" : [1, 2], "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1, 2], "\ud83d" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1, 2], "ndistinct" : "\ud83d"}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : ["\ud83d", 2], "ndistinct" : 1}]', 'pg_ndistinct'); + +-- Valid keys, invalid values +SELECT '[{"attributes" : null, "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [], "ndistinct" : 1}]'::pg_ndistinct; +SELECT '[{"attributes" : [2], "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,null], "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "ndistinct" : null}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,"a"], "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "ndistinct" : "a"}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "ndistinct" : []}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "ndistinct" : [null]}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "ndistinct" : [1,null]}]'::pg_ndistinct; +SELECT '[{"attributes" : [2,3], "ndistinct" : {"a": 1}}]'::pg_ndistinct; +SELECT '[{"attributes" : [0,1], "ndistinct" : 1}]'::pg_ndistinct; +SELECT '[{"attributes" : [-7,-9], "ndistinct" : 1}]'::pg_ndistinct; +SELECT '[{"attributes" : 1, "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : "a", "ndistinct" : 4}]'::pg_ndistinct; +SELECT '[{"attributes" : {"a": 1}, "ndistinct" : 1}]'::pg_ndistinct; +SELECT '[{"attributes" : [1, {"a": 1}], "ndistinct" : 1}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"attributes" : null, "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [], "ndistinct" : 1}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2], "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,null], "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : null}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,"a"], "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : "a"}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : []}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : [null]}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : [1,null]}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : {"a": 1}}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : 1, "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [-7,-9], "ndistinct" : 1}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : 1, "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : "a", "ndistinct" : 4}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : {"a": 1}, "ndistinct" : 1}]', 'pg_ndistinct'); +SELECT * FROM pg_input_error_info('[{"attributes" : [1, {"a": 1}], "ndistinct" : 1}]', 'pg_ndistinct'); +-- Duplicated attributes +SELECT '[{"attributes" : [2,2], "ndistinct" : 4}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,2], "ndistinct" : 4}]', 'pg_ndistinct'); +-- Duplicated attribute lists. +SELECT '[{"attributes" : [2,3], "ndistinct" : 4}, + {"attributes" : [2,3], "ndistinct" : 4}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : 4}, + {"attributes" : [2,3], "ndistinct" : 4}]', 'pg_ndistinct'); +-- Partially-covered attribute lists. +SELECT '[{"attributes" : [2,3], "ndistinct" : 4}, + {"attributes" : [2,-1], "ndistinct" : 4}, + {"attributes" : [2,3,-1], "ndistinct" : 4}, + {"attributes" : [1,3,-1,-2], "ndistinct" : 4}]'::pg_ndistinct; +SELECT * FROM pg_input_error_info('[{"attributes" : [2,3], "ndistinct" : 4}, + {"attributes" : [2,-1], "ndistinct" : 4}, + {"attributes" : [2,3,-1], "ndistinct" : 4}, + {"attributes" : [1,3,-1,-2], "ndistinct" : 4}]', 'pg_ndistinct'); + +-- Valid inputs +-- Two attributes. +SELECT '[{"attributes" : [1,2], "ndistinct" : 4}]'::pg_ndistinct; +-- Three attributes. +SELECT '[{"attributes" : [2,-1], "ndistinct" : 1}, + {"attributes" : [3,-1], "ndistinct" : 2}, + {"attributes" : [2,3,-1], "ndistinct" : 3}]'::pg_ndistinct; +-- Three attributes with only two items. +SELECT '[{"attributes" : [2,-1], "ndistinct" : 1}, + {"attributes" : [2,3,-1], "ndistinct" : 3}]'::pg_ndistinct; diff --git a/crates/squawk_parser/tests/data/regression_suite/plpgsql.sql b/crates/squawk_parser/tests/data/regression_suite/plpgsql.sql index 85fbee49..0fae1b82 100644 --- a/crates/squawk_parser/tests/data/regression_suite/plpgsql.sql +++ b/crates/squawk_parser/tests/data/regression_suite/plpgsql.sql @@ -1863,6 +1863,7 @@ copy rc_test from stdin; -- 5 10 -- 50 100 -- 500 1000 +-- \. create function return_unnamed_refcursor() returns refcursor as $$ declare diff --git a/crates/squawk_parser/tests/data/regression_suite/polymorphism.sql b/crates/squawk_parser/tests/data/regression_suite/polymorphism.sql index beddca83..69a2262e 100644 --- a/crates/squawk_parser/tests/data/regression_suite/polymorphism.sql +++ b/crates/squawk_parser/tests/data/regression_suite/polymorphism.sql @@ -739,6 +739,7 @@ $$ language sql; select dfunc(); -- verify it lists properly +-- \df dfunc drop function dfunc(int, int); @@ -818,6 +819,7 @@ select dfunc(10,20); create or replace function dfunc(a variadic int[]) returns int as $$ select array_upper($1, 1) $$ language sql; +-- \df dfunc drop function dfunc(a variadic int[]); @@ -871,6 +873,26 @@ select * from dfunc(1,c := 2,d := 3); -- fail, no value for b drop function dfunc(int, int, int, int); +create function xleast(x numeric, variadic arr numeric[]) + returns numeric as $$ + select least(x, min(arr[i])) from generate_subscripts(arr, 1) g(i); +$$ language sql; + +select xleast(x => 1, variadic arr => array[2,3]); +select xleast(1, variadic arr => array[2,3]); + +set search_path = pg_catalog; +select xleast(1, variadic arr => array[2,3]); -- wrong schema +reset search_path; +select xleast(foo => 1, variadic arr => array[2,3]); -- wrong argument name +select xleast(x => 1, variadic array[2,3]); -- misuse of mixed notation +select xleast(1, variadic x => array[2,3]); -- misuse of mixed notation +select xleast(arr => array[1], variadic x => 3); -- wrong arg is VARIADIC +select xleast(arr => array[1], x => 3); -- failed to use VARIADIC +select xleast(arr => 1, variadic x => array[2,3]); -- mixed-up args + +drop function xleast(x numeric, variadic arr numeric[]); + -- test with different parameter types create function dfunc(a varchar, b numeric, c date = current_date) returns table (a varchar, b numeric, c date) as $$ @@ -991,6 +1013,7 @@ CREATE VIEW dfview AS select * from dfview; +-- \d+ dfview drop view dfview; drop function dfunc(anyelement, anyelement, bool); diff --git a/crates/squawk_parser/tests/data/regression_suite/predicate.sql b/crates/squawk_parser/tests/data/regression_suite/predicate.sql index 9dcb81b1..7d4fda1b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/predicate.sql +++ b/crates/squawk_parser/tests/data/regression_suite/predicate.sql @@ -115,6 +115,24 @@ SELECT * FROM pred_tab t1 LEFT JOIN pred_tab t2 ON t1.a = 1 LEFT JOIN pred_tab t3 ON t2.a IS NULL OR t2.c IS NULL; +-- +-- Tests for NullTest reduction in EXISTS sublink +-- + +-- Ensure the IS_NOT_NULL qual is ignored +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab t1 + LEFT JOIN pred_tab t2 ON EXISTS + (SELECT 1 FROM pred_tab t3, pred_tab t4, pred_tab t5, pred_tab t6 + WHERE t1.a = t3.a AND t6.a IS NOT NULL); + +-- Ensure the IS_NULL qual is reduced to constant-FALSE +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab t1 + LEFT JOIN pred_tab t2 ON EXISTS + (SELECT 1 FROM pred_tab t3, pred_tab t4, pred_tab t5, pred_tab t6 + WHERE t1.a = t3.a AND t6.a IS NULL); + DROP TABLE pred_tab; -- Validate we handle IS NULL and IS NOT NULL quals correctly with inheritance @@ -183,3 +201,42 @@ SELECT * FROM pred_tab t1 DROP TABLE pred_tab; DROP TABLE pred_tab_notnull; + +-- Validate that NullTest quals in constraint expressions are reduced correctly +CREATE TABLE pred_tab1 (a int NOT NULL, b int, + CONSTRAINT check_tab1 CHECK (a IS NULL OR b > 2)); +CREATE TABLE pred_tab2 (a int, b int, + CONSTRAINT check_a CHECK (a IS NOT NULL)); + +SET constraint_exclusion TO ON; + +-- Ensure that we get a dummy plan +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab1, pred_tab2 WHERE pred_tab2.a IS NULL; + +-- Ensure that we get a dummy plan +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab2, pred_tab1 WHERE pred_tab1.a IS NULL OR pred_tab1.b < 2; + +RESET constraint_exclusion; +DROP TABLE pred_tab1; +DROP TABLE pred_tab2; + +-- Validate that NullTest quals in index expressions and predicate are reduced correctly +CREATE TABLE pred_tab (a int, b int NOT NULL, c int NOT NULL); +INSERT INTO pred_tab SELECT i, i, i FROM generate_series(1, 1000) i; +CREATE INDEX pred_tab_exprs_idx ON pred_tab ((a < 5 AND b IS NOT NULL AND c IS NOT NULL)); +CREATE INDEX pred_tab_pred_idx ON pred_tab (a) WHERE b IS NOT NULL AND c IS NOT NULL; +ANALYZE pred_tab; + +-- Ensure that index pred_tab_exprs_idx is used +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab WHERE (a < 5 AND b IS NOT NULL AND c IS NOT NULL) IS TRUE; +SELECT * FROM pred_tab WHERE (a < 5 AND b IS NOT NULL AND c IS NOT NULL) IS TRUE; + +-- Ensure that index pred_tab_pred_idx is used +EXPLAIN (COSTS OFF) +SELECT * FROM pred_tab WHERE a < 3 AND b IS NOT NULL AND c IS NOT NULL; +SELECT * FROM pred_tab WHERE a < 3 AND b IS NOT NULL AND c IS NOT NULL; + +DROP TABLE pred_tab; diff --git a/crates/squawk_parser/tests/data/regression_suite/prepared_xacts.sql b/crates/squawk_parser/tests/data/regression_suite/prepared_xacts.sql index cfb47e23..8e681cca 100644 --- a/crates/squawk_parser/tests/data/regression_suite/prepared_xacts.sql +++ b/crates/squawk_parser/tests/data/regression_suite/prepared_xacts.sql @@ -133,6 +133,7 @@ lock table pxtest3 in access share mode nowait; rollback; -- Disconnect, we will continue testing in a different backend +-- \c - -- There should still be two prepared transactions SELECT gid FROM pg_prepared_xacts WHERE gid ~ '^regress_' ORDER BY gid; @@ -144,6 +145,7 @@ rollback; -- Commit table creation COMMIT PREPARED 'regress_sub1'; +-- \d pxtest2 SELECT * FROM pxtest2; -- There should be one prepared transaction diff --git a/crates/squawk_parser/tests/data/regression_suite/privileges.sql b/crates/squawk_parser/tests/data/regression_suite/privileges.sql index 19bff6b6..30633a24 100644 --- a/crates/squawk_parser/tests/data/regression_suite/privileges.sql +++ b/crates/squawk_parser/tests/data/regression_suite/privileges.sql @@ -90,21 +90,6 @@ CREATE USER regress_priv_user3; CREATE USER regress_priv_user4; CREATE USER regress_priv_user5; --- DROP OWNED should also act on granted and granted-to roles -GRANT regress_priv_user1 TO regress_priv_user2; -GRANT regress_priv_user2 TO regress_priv_user3; -SELECT roleid::regrole, member::regrole FROM pg_auth_members - WHERE roleid IN ('regress_priv_user1'::regrole,'regress_priv_user2'::regrole) - ORDER BY roleid::regrole::text; -REASSIGN OWNED BY regress_priv_user2 TO regress_priv_user4; -- no effect -SELECT roleid::regrole, member::regrole FROM pg_auth_members - WHERE roleid IN ('regress_priv_user1'::regrole,'regress_priv_user2'::regrole) - ORDER BY roleid::regrole::text; -DROP OWNED BY regress_priv_user2; -- removes both grants -SELECT roleid::regrole, member::regrole FROM pg_auth_members - WHERE roleid IN ('regress_priv_user1'::regrole,'regress_priv_user2'::regrole) - ORDER BY roleid::regrole::text; - GRANT pg_read_all_data TO regress_priv_user6; GRANT pg_write_all_data TO regress_priv_user7; GRANT pg_read_all_settings TO regress_priv_user8 WITH ADMIN OPTION; @@ -267,7 +252,10 @@ TRUNCATE atest2; -- fail BEGIN; LOCK atest2 IN ACCESS EXCLUSIVE MODE; -- fail COMMIT; --- checks in subquery, both ok +COPY atest2 FROM stdin; -- fail +-- GRANT ALL ON atest1 TO PUBLIC; -- fail +-- +-- -- checks in subquery, both ok SELECT * FROM atest1 WHERE ( b IN ( SELECT col1 FROM atest2 ) ); SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); @@ -307,11 +295,16 @@ TRUNCATE atest2; -- fail BEGIN; LOCK atest2 IN ACCESS EXCLUSIVE MODE; -- ok COMMIT; --- checks in subquery, both fail +COPY atest2 FROM stdin; -- fail +-- +-- -- checks in subquery, both fail SELECT * FROM atest1 WHERE ( b IN ( SELECT col1 FROM atest2 ) ); SELECT * FROM atest2 WHERE ( col1 IN ( SELECT b FROM atest1 ) ); SET SESSION AUTHORIZATION regress_priv_user4; +COPY atest2 FROM stdin; -- ok +-- bar true +-- \. SELECT * FROM atest1; -- ok @@ -338,8 +331,6 @@ CREATE VIEW atest12v AS SELECT * FROM atest12 WHERE b <<< 5; CREATE VIEW atest12sbv WITH (security_barrier=true) AS SELECT * FROM atest12 WHERE b <<< 5; -GRANT SELECT ON atest12v TO PUBLIC; -GRANT SELECT ON atest12sbv TO PUBLIC; -- This plan should use nestloop, knowing that few rows will be selected. EXPLAIN (COSTS OFF) SELECT * FROM atest12v x, atest12v y WHERE x.a = y.b; @@ -361,8 +352,16 @@ CREATE FUNCTION leak2(integer,integer) RETURNS boolean CREATE OPERATOR >>> (procedure = leak2, leftarg = integer, rightarg = integer, restrict = scalargtsel); --- This should not show any "leak" notices before failing. +-- These should not show any "leak" notices before failing. EXPLAIN (COSTS OFF) SELECT * FROM atest12 WHERE a >>> 0; +EXPLAIN (COSTS OFF) SELECT * FROM atest12v WHERE a >>> 0; +EXPLAIN (COSTS OFF) SELECT * FROM atest12sbv WHERE a >>> 0; + +-- Now regress_priv_user1 grants access to regress_priv_user2 via the views. +SET SESSION AUTHORIZATION regress_priv_user1; +GRANT SELECT ON atest12v TO PUBLIC; +GRANT SELECT ON atest12sbv TO PUBLIC; +SET SESSION AUTHORIZATION regress_priv_user2; -- These plans should continue to use a nestloop, since they execute with the -- privileges of the view owner. @@ -543,6 +542,10 @@ SELECT one, two FROM atest5 NATURAL JOIN atest6; -- ok now -- test column-level privileges for INSERT and UPDATE INSERT INTO atest5 (two) VALUES (3); -- ok +COPY atest5 FROM stdin; -- fail +COPY atest5 (two) FROM stdin; -- ok +-- 1 +-- \. INSERT INTO atest5 (three) VALUES (4); -- fail INSERT INTO atest5 VALUES (5,5,5); -- fail UPDATE atest5 SET three = 10; -- ok @@ -840,6 +843,7 @@ END; -- privileges on functions, languages -- switch to superuser +-- \c - REVOKE ALL PRIVILEGES ON LANGUAGE sql FROM PUBLIC; GRANT USAGE ON LANGUAGE sql TO regress_priv_user1; -- ok @@ -892,6 +896,7 @@ DROP FUNCTION priv_testfunc1(int); -- fail DROP AGGREGATE priv_testagg1(int); -- fail DROP PROCEDURE priv_testproc1(int); -- fail +-- \c - DROP FUNCTION priv_testfunc1(int); -- ok -- restore to sanity @@ -909,6 +914,7 @@ ROLLBACK; -- privileges on types -- switch to superuser +-- \c - CREATE TYPE priv_testtype1 AS (a int, b text); REVOKE USAGE ON TYPE priv_testtype1 FROM PUBLIC; @@ -993,6 +999,7 @@ CREATE TABLE test11b AS (SELECT 1::priv_testdomain1 AS a); REVOKE ALL ON TYPE priv_testtype1 FROM PUBLIC; +-- \c - DROP AGGREGATE priv_testagg1b(priv_testdomain1); DROP DOMAIN priv_testdomain2b; DROP OPERATOR !! (NONE, priv_testdomain1); @@ -1029,6 +1036,7 @@ select has_table_privilege(-999999,'pg_authid','update'); select has_table_privilege(1,'select'); -- superuser +-- \c - select has_table_privilege(current_user,'pg_authid','select'); select has_table_privilege(current_user,'pg_authid','insert'); @@ -1163,6 +1171,7 @@ SELECT has_table_privilege('regress_priv_user1', 'atest4', 'SELECT WITH GRANT OP -- security-restricted operations +-- \c - CREATE ROLE regress_sro_user; -- Check that index expressions and predicates are run as the table's owner @@ -1220,6 +1229,7 @@ CREATE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS -- REFRESH of this MV will queue a GRANT at end of transaction CREATE MATERIALIZED VIEW sro_mv AS SELECT mv_action() WITH NO DATA; REFRESH MATERIALIZED VIEW sro_mv; +-- \c - REFRESH MATERIALIZED VIEW sro_mv; SET SESSION AUTHORIZATION regress_sro_user; @@ -1233,6 +1243,7 @@ CREATE CONSTRAINT TRIGGER t AFTER INSERT ON sro_trojan_table CREATE OR REPLACE FUNCTION mv_action() RETURNS bool LANGUAGE sql AS 'INSERT INTO public.sro_trojan_table DEFAULT VALUES; SELECT true'; REFRESH MATERIALIZED VIEW sro_mv; +-- \c - REFRESH MATERIALIZED VIEW sro_mv; BEGIN; SET CONSTRAINTS ALL IMMEDIATE; REFRESH MATERIALIZED VIEW sro_mv; COMMIT; @@ -1249,6 +1260,7 @@ EXCEPTION WHEN OTHERS THEN END$$; CREATE MATERIALIZED VIEW sro_index_mv AS SELECT 1 AS c; CREATE UNIQUE INDEX ON sro_index_mv (c) WHERE unwanted_grant_nofail(1) > 0; +-- \c - REFRESH MATERIALIZED VIEW CONCURRENTLY sro_index_mv; REFRESH MATERIALIZED VIEW sro_index_mv; @@ -1280,6 +1292,7 @@ REVOKE regress_priv_group2 FROM regress_priv_user5; -- has_sequence_privilege tests +-- \c - CREATE SEQUENCE x_seq; @@ -1294,6 +1307,7 @@ SET SESSION AUTHORIZATION regress_priv_user2; SELECT has_sequence_privilege('x_seq', 'USAGE'); -- largeobject privilege tests +-- \c - SET SESSION AUTHORIZATION regress_priv_user1; SELECT lo_create(1001); @@ -1312,6 +1326,7 @@ GRANT SELECT, INSERT ON LARGE OBJECT 1001 TO PUBLIC; -- to be failed GRANT SELECT, UPDATE ON LARGE OBJECT 1001 TO nosuchuser; -- to be failed GRANT SELECT, UPDATE ON LARGE OBJECT 999 TO PUBLIC; -- to be failed +-- \c - SET SESSION AUTHORIZATION regress_priv_user2; SELECT lo_create(2001); @@ -1338,6 +1353,7 @@ GRANT ALL ON LARGE OBJECT 2001 TO regress_priv_user3; SELECT lo_unlink(1001); -- to be denied SELECT lo_unlink(2002); +-- \c - -- confirm ACL setting SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid; @@ -1353,6 +1369,7 @@ SELECT lo_truncate(lo_open(2001, x'20000'::int), 10); -- has_largeobject_privilege function -- superuser +-- \c - SELECT has_largeobject_privilege(1001, 'SELECT'); SELECT has_largeobject_privilege(1002, 'SELECT'); SELECT has_largeobject_privilege(1003, 'SELECT'); @@ -1386,6 +1403,7 @@ SELECT has_largeobject_privilege('regress_priv_user3', 1005, 'UPDATE'); -- false SELECT has_largeobject_privilege('regress_priv_user3', 2001, 'UPDATE'); -- compatibility mode in largeobject permission +-- \c - SET lo_compat_privileges = false; -- default setting SET SESSION AUTHORIZATION regress_priv_user4; @@ -1401,6 +1419,7 @@ SELECT lo_export(1001, '/dev/null'); -- to be denied SELECT lo_import('/dev/null'); -- to be denied SELECT lo_import('/dev/null', 2003); -- to be denied +-- \c - SET lo_compat_privileges = true; -- compatibility mode SET SESSION AUTHORIZATION regress_priv_user4; @@ -1414,6 +1433,7 @@ SELECT lo_unlink(1002); SELECT lo_export(1001, '/dev/null'); -- to be denied -- don't allow unpriv users to access pg_largeobject contents +-- \c - SELECT * FROM pg_largeobject LIMIT 0; SET SESSION AUTHORIZATION regress_priv_user1; @@ -1466,6 +1486,7 @@ INSERT INTO datdba_only DEFAULT VALUES; ROLLBACK; -- test default ACLs +-- \c - CREATE SCHEMA testns; GRANT ALL ON SCHEMA testns TO regress_priv_user1; @@ -1514,6 +1535,14 @@ SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, SELECT makeaclitem('regress_priv_user1'::regrole, 'regress_priv_user2'::regrole, 'SELECT, fake_privilege', FALSE); -- error +-- Test quoting and dequoting of user names in ACLs +CREATE ROLE "regress_""quoted"; +SELECT makeaclitem('regress_"quoted'::regrole, 'regress_"quoted'::regrole, + 'SELECT', TRUE); +SELECT '"regress_""quoted"=r*/"regress_""quoted"'::aclitem; +SELECT '""=r*/""'::aclitem; -- used to be misparsed as """" +DROP ROLE "regress_""quoted"; + -- Test non-throwing aclitem I/O SELECT pg_input_is_valid('regress_priv_user1=r/regress_priv_user2', 'aclitem'); SELECT pg_input_is_valid('regress_priv_user1=r/', 'aclitem'); @@ -1589,6 +1618,7 @@ ROLLBACK; ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT ALL ON LARGE OBJECTS TO public; -- error +-- \c - -- Test for DROP OWNED BY with shared dependencies. This is done in a -- separate, rollbacked, transaction to avoid any trouble with other @@ -1676,6 +1706,7 @@ SELECT d.* -- check that entries went away -- Grant on all objects of given type in a schema +-- \c - CREATE SCHEMA testns; CREATE TABLE testns.t1 (f1 int); @@ -1721,6 +1752,7 @@ DROP SCHEMA testns CASCADE; -- Change owner of the schema & and rename of new schema owner +-- \c - CREATE ROLE regress_schemauser1 superuser login; CREATE ROLE regress_schemauser2 superuser login; @@ -1738,12 +1770,14 @@ set session role regress_schemauser_renamed; DROP SCHEMA testns CASCADE; -- clean up +-- \c - DROP ROLE regress_schemauser1; DROP ROLE regress_schemauser_renamed; -- test that dependent privileges are revoked (or not) properly +-- \c - set session role regress_priv_user1; create table dep_priv_test (a int); @@ -1755,16 +1789,20 @@ set session role regress_priv_user3; grant select on dep_priv_test to regress_priv_user4 with grant option; set session role regress_priv_user4; grant select on dep_priv_test to regress_priv_user5; +-- \dp dep_priv_test set session role regress_priv_user2; revoke select on dep_priv_test from regress_priv_user4 cascade; +-- \dp dep_priv_test set session role regress_priv_user3; revoke select on dep_priv_test from regress_priv_user4 cascade; +-- \dp dep_priv_test set session role regress_priv_user1; drop table dep_priv_test; -- clean up +-- \c drop sequence x_seq; @@ -1810,6 +1848,13 @@ DROP USER regress_priv_user7; DROP USER regress_priv_user8; -- does not exist +-- leave some default ACLs for pg_upgrade's dump-restore test input. +ALTER DEFAULT PRIVILEGES FOR ROLE pg_signal_backend + REVOKE USAGE ON TYPES FROM pg_signal_backend; +ALTER DEFAULT PRIVILEGES FOR ROLE pg_read_all_settings + REVOKE USAGE ON TYPES FROM pg_read_all_settings; + + -- permissions with LOCK TABLE CREATE USER regress_locktable_user; CREATE TABLE lock_table (a int); @@ -1826,6 +1871,7 @@ ROLLBACK; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail ROLLBACK; +-- \c REVOKE SELECT ON lock_table FROM regress_locktable_user; -- LOCK TABLE and INSERT permission @@ -1840,6 +1886,7 @@ COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail ROLLBACK; +-- \c REVOKE INSERT ON lock_table FROM regress_locktable_user; -- LOCK TABLE and UPDATE permission @@ -1854,6 +1901,7 @@ COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass COMMIT; +-- \c REVOKE UPDATE ON lock_table FROM regress_locktable_user; -- LOCK TABLE and DELETE permission @@ -1868,6 +1916,7 @@ COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass COMMIT; +-- \c REVOKE DELETE ON lock_table FROM regress_locktable_user; -- LOCK TABLE and TRUNCATE permission @@ -1882,6 +1931,7 @@ COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass COMMIT; +-- \c REVOKE TRUNCATE ON lock_table FROM regress_locktable_user; -- LOCK TABLE and MAINTAIN permission @@ -1896,6 +1946,7 @@ COMMIT; BEGIN; LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass COMMIT; +-- \c REVOKE MAINTAIN ON lock_table FROM regress_locktable_user; -- clean up @@ -1903,9 +1954,11 @@ DROP TABLE lock_table; DROP USER regress_locktable_user; -- test to check privileges of system views pg_shmem_allocations, --- pg_shmem_allocations_numa and pg_backend_memory_contexts. +-- pg_shmem_allocations_numa, pg_dsm_registry_allocations, and +-- pg_backend_memory_contexts. -- switch to superuser +-- \c - CREATE ROLE regress_readallstats; @@ -1913,6 +1966,7 @@ SELECT has_table_privilege('regress_readallstats','pg_aios','SELECT'); -- no SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- no SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- no SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations_numa','SELECT'); -- no +SELECT has_table_privilege('regress_readallstats','pg_dsm_registry_allocations','SELECT'); -- no GRANT pg_read_all_stats TO regress_readallstats; @@ -1920,6 +1974,7 @@ SELECT has_table_privilege('regress_readallstats','pg_aios','SELECT'); -- yes SELECT has_table_privilege('regress_readallstats','pg_backend_memory_contexts','SELECT'); -- yes SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations','SELECT'); -- yes SELECT has_table_privilege('regress_readallstats','pg_shmem_allocations_numa','SELECT'); -- yes +SELECT has_table_privilege('regress_readallstats','pg_dsm_registry_allocations','SELECT'); -- yes -- run query to ensure that functions within views can be executed SET ROLE regress_readallstats; diff --git a/crates/squawk_parser/tests/data/regression_suite/publication.sql b/crates/squawk_parser/tests/data/regression_suite/publication.sql index d31ea1aa..12223dea 100644 --- a/crates/squawk_parser/tests/data/regression_suite/publication.sql +++ b/crates/squawk_parser/tests/data/regression_suite/publication.sql @@ -26,10 +26,13 @@ CREATE PUBLICATION testpub_xxx WITH (publish = 'cluster, vacuum'); CREATE PUBLICATION testpub_xxx WITH (publish_via_partition_root = 'true', publish_via_partition_root = '0'); CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = stored, publish_generated_columns = none); CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns = foo); +CREATE PUBLICATION testpub_xxx WITH (publish_generated_columns); +-- \dRp ALTER PUBLICATION testpub_default SET (publish = 'insert, update, delete'); +-- \dRp --- adding tables CREATE SCHEMA pub_test; @@ -63,10 +66,13 @@ CREATE PUBLICATION testpub_fortable FOR TABLE testpub_tbl1; RESET client_min_messages; -- should be able to add schema to 'FOR TABLE' publication ALTER PUBLICATION testpub_fortable ADD TABLES IN SCHEMA pub_test; +-- \dRp+ testpub_fortable -- should be able to drop schema from 'FOR TABLE' publication ALTER PUBLICATION testpub_fortable DROP TABLES IN SCHEMA pub_test; +-- \dRp+ testpub_fortable -- should be able to set schema to 'FOR TABLE' publication ALTER PUBLICATION testpub_fortable SET TABLES IN SCHEMA pub_test; +-- \dRp+ testpub_fortable SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pub_test; @@ -74,6 +80,7 @@ CREATE PUBLICATION testpub_forschema FOR TABLES IN SCHEMA pub_test; -- schema CREATE PUBLICATION testpub_for_tbl_schema FOR TABLES IN SCHEMA pub_test, TABLE pub_test.testpub_nopk; RESET client_min_messages; +-- \dRp+ testpub_for_tbl_schema -- weird parser corner case CREATE PUBLICATION testpub_parsertst FOR TABLE pub_test.testpub_nopk, CURRENT_SCHEMA; @@ -81,17 +88,22 @@ CREATE PUBLICATION testpub_parsertst FOR TABLES IN SCHEMA foo, test.foo; -- should be able to add a table of the same schema to the schema publication ALTER PUBLICATION testpub_forschema ADD TABLE pub_test.testpub_nopk; +-- \dRp+ testpub_forschema -- should be able to drop the table ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; +-- \dRp+ testpub_forschema -- fail - can't drop a table from the schema publication which isn't in the -- publication ALTER PUBLICATION testpub_forschema DROP TABLE pub_test.testpub_nopk; -- should be able to set table to schema publication ALTER PUBLICATION testpub_forschema SET TABLE pub_test.testpub_nopk; +-- \dRp+ testpub_forschema SELECT pubname, puballtables FROM pg_publication WHERE pubname = 'testpub_foralltables'; +-- \d+ testpub_tbl2 +-- \dRp+ testpub_foralltables DROP TABLE testpub_tbl2; DROP PUBLICATION testpub_foralltables, testpub_fortable, testpub_forschema, testpub_for_tbl_schema; @@ -102,10 +114,58 @@ SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub3 FOR TABLE testpub_tbl3; CREATE PUBLICATION testpub4 FOR TABLE ONLY testpub_tbl3; RESET client_min_messages; +-- \dRp+ testpub3 +-- \dRp+ testpub4 DROP TABLE testpub_tbl3, testpub_tbl3a; DROP PUBLICATION testpub3, testpub4; +--- Tests for publications with SEQUENCES +CREATE SEQUENCE regress_pub_seq0; +CREATE SEQUENCE pub_test.regress_pub_seq1; + +-- FOR ALL SEQUENCES +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION regress_pub_forallsequences1 FOR ALL SEQUENCES; +RESET client_min_messages; + +SELECT pubname, puballtables, puballsequences FROM pg_publication WHERE pubname = 'regress_pub_forallsequences1'; +-- \d+ regress_pub_seq0 +-- \dRp+ regress_pub_forallsequences1 + +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION regress_pub_forallsequences2 FOR ALL SEQUENCES; +RESET client_min_messages; + +-- check that describe sequence lists both publications the sequence belongs to +-- \d+ pub_test.regress_pub_seq1 + +--- Specifying both ALL TABLES and ALL SEQUENCES +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION regress_pub_for_allsequences_alltables FOR ALL SEQUENCES, ALL TABLES; + +-- Specifying WITH clause in an ALL SEQUENCES publication will emit a NOTICE. +SET client_min_messages = 'NOTICE'; +CREATE PUBLICATION regress_pub_for_allsequences_alltables_withclause FOR ALL SEQUENCES, ALL TABLES WITH (publish = 'insert'); +CREATE PUBLICATION regress_pub_for_allsequences_withclause FOR ALL SEQUENCES WITH (publish_generated_columns = 'stored'); +RESET client_min_messages; + +SELECT pubname, puballtables, puballsequences FROM pg_publication WHERE pubname = 'regress_pub_for_allsequences_alltables'; +-- \dRp+ regress_pub_for_allsequences_alltables + +DROP SEQUENCE regress_pub_seq0, pub_test.regress_pub_seq1; +DROP PUBLICATION regress_pub_forallsequences1; +DROP PUBLICATION regress_pub_forallsequences2; +DROP PUBLICATION regress_pub_for_allsequences_alltables; +DROP PUBLICATION regress_pub_for_allsequences_alltables_withclause; +DROP PUBLICATION regress_pub_for_allsequences_withclause; + +-- fail - Specifying ALL TABLES more than once +CREATE PUBLICATION regress_pub_for_allsequences_alltables FOR ALL SEQUENCES, ALL TABLES, ALL TABLES; + +-- fail - Specifying ALL SEQUENCES more than once +CREATE PUBLICATION regress_pub_for_allsequences_alltables FOR ALL SEQUENCES, ALL TABLES, ALL SEQUENCES; + -- Tests for partitioned tables SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_forparted; @@ -120,6 +180,7 @@ ALTER TABLE testpub_parted ATTACH PARTITION testpub_parted2 FOR VALUES IN (2); UPDATE testpub_parted1 SET a = 1; -- only parent is listed as being in publication, not the partition ALTER PUBLICATION testpub_forparted ADD TABLE testpub_parted; +-- \dRp+ testpub_forparted -- works despite missing REPLICA IDENTITY, because no actual update happened UPDATE testpub_parted SET a = 1 WHERE false; -- should now fail, because parent's publication replicates updates @@ -128,6 +189,7 @@ ALTER TABLE testpub_parted DETACH PARTITION testpub_parted1; -- works again, because parent's publication is no longer considered UPDATE testpub_parted1 SET a = 1; ALTER PUBLICATION testpub_forparted SET (publish_via_partition_root = true); +-- \dRp+ testpub_forparted -- still fail, because parent's publication replicates updates UPDATE testpub_parted2 SET a = 2; ALTER PUBLICATION testpub_forparted DROP TABLE testpub_parted; @@ -151,24 +213,34 @@ SET client_min_messages = 'ERROR'; -- validation of referenced columns is less strict than for delete/update. CREATE PUBLICATION testpub5 FOR TABLE testpub_rf_tbl1, testpub_rf_tbl2 WHERE (c <> 'test' AND d < 5) WITH (publish = 'insert'); RESET client_min_messages; +-- \dRp+ testpub5 +-- \d testpub_rf_tbl3 ALTER PUBLICATION testpub5 ADD TABLE testpub_rf_tbl3 WHERE (e > 1000 AND e < 2000); +-- \dRp+ testpub5 +-- \d testpub_rf_tbl3 ALTER PUBLICATION testpub5 DROP TABLE testpub_rf_tbl2; +-- \dRp+ testpub5 -- remove testpub_rf_tbl1 and add testpub_rf_tbl3 again (another WHERE expression) ALTER PUBLICATION testpub5 SET TABLE testpub_rf_tbl3 WHERE (e > 300 AND e < 500); +-- \dRp+ testpub5 +-- \d testpub_rf_tbl3 -- test \d (now it displays filter information) SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_rf_yes FOR TABLE testpub_rf_tbl1 WHERE (a > 1) WITH (publish = 'insert'); CREATE PUBLICATION testpub_rf_no FOR TABLE testpub_rf_tbl1; RESET client_min_messages; +-- \d testpub_rf_tbl1 DROP PUBLICATION testpub_rf_yes, testpub_rf_no; -- some more syntax tests to exercise other parser pathways SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_syntax1 FOR TABLE testpub_rf_tbl1, ONLY testpub_rf_tbl3 WHERE (e < 999) WITH (publish = 'insert'); RESET client_min_messages; +-- \dRp+ testpub_syntax1 DROP PUBLICATION testpub_syntax1; SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_syntax2 FOR TABLE testpub_rf_tbl1, testpub_rf_schema1.testpub_rf_tbl5 WHERE (h < 999) WITH (publish = 'insert'); RESET client_min_messages; +-- \dRp+ testpub_syntax2 DROP PUBLICATION testpub_syntax2; -- fail - schemas don't allow WHERE clause SET client_min_messages = 'ERROR'; @@ -235,7 +307,11 @@ CREATE PUBLICATION testpub6 FOR TABLES IN SCHEMA testpub_rf_schema2; -- should be able to set publication with schema and table of the same schema ALTER PUBLICATION testpub6 SET TABLES IN SCHEMA testpub_rf_schema2, TABLE testpub_rf_schema2.testpub_rf_tbl6 WHERE (i < 99); RESET client_min_messages; +-- \dRp+ testpub6 -- fail - virtual generated column uses user-defined function +-- (Actually, this already fails at CREATE TABLE rather than at CREATE +-- PUBLICATION, but let's keep the test in case the former gets +-- relaxed sometime.) CREATE TABLE testpub_rf_tbl6 (id int PRIMARY KEY, x int, y int GENERATED ALWAYS AS (x * testpub_rf_func2()) VIRTUAL); CREATE PUBLICATION testpub7 FOR TABLE testpub_rf_tbl6 WHERE (y > 100); -- test that SET EXPRESSION is rejected, because it could affect a row filter @@ -250,7 +326,7 @@ DROP TABLE testpub_rf_tbl2; DROP TABLE testpub_rf_tbl3; DROP TABLE testpub_rf_tbl4; DROP TABLE testpub_rf_tbl5; -DROP TABLE testpub_rf_tbl6; +--DROP TABLE testpub_rf_tbl6; DROP TABLE testpub_rf_schema1.testpub_rf_tbl5; DROP TABLE testpub_rf_schema2.testpub_rf_tbl6; DROP SCHEMA testpub_rf_schema1; @@ -488,6 +564,7 @@ SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub_table_ins WITH (publish = 'insert, truncate'); RESET client_min_messages; ALTER PUBLICATION testpub_table_ins ADD TABLE testpub_tbl5 (a); -- ok +-- \dRp+ testpub_table_ins -- error: cannot work with deferrable primary keys CREATE TABLE testpub_tbl5d (a int PRIMARY KEY DEFERRABLE); @@ -512,10 +589,13 @@ UPDATE testpub_tbl6 SET a = 1; -- make sure changing the column list is propagated to the catalog CREATE TABLE testpub_tbl7 (a int primary key, b text, c text); ALTER PUBLICATION testpub_fortable ADD TABLE testpub_tbl7 (a, b); +-- \d+ testpub_tbl7 -- ok: the column list is the same, we should skip this table (or at least not fail) ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, b); +-- \d+ testpub_tbl7 -- ok: the column list changes, make sure the catalog gets updated ALTER PUBLICATION testpub_fortable SET TABLE testpub_tbl7 (a, c); +-- \d+ testpub_tbl7 -- column list for partitioned tables has to cover replica identities for -- all child relations @@ -625,6 +705,8 @@ RESET client_min_messages; CREATE TABLE testpub_tbl_both_filters (a int, b int, c int, PRIMARY KEY (a,c)); ALTER TABLE testpub_tbl_both_filters REPLICA IDENTITY USING INDEX testpub_tbl_both_filters_pkey; ALTER PUBLICATION testpub_both_filters ADD TABLE testpub_tbl_both_filters (a,c) WHERE (c != 1); +-- \dRp+ testpub_both_filters +-- \d+ testpub_tbl_both_filters DROP TABLE testpub_tbl_both_filters; DROP PUBLICATION testpub_both_filters; @@ -791,6 +873,7 @@ ALTER PUBLICATION testpub_fortbl ADD TABLE testpub_tbl1; -- fail - already added CREATE PUBLICATION testpub_fortbl FOR TABLE testpub_tbl1; +-- \dRp+ testpub_fortbl -- fail - view ALTER PUBLICATION testpub_default ADD TABLE testpub_view; @@ -801,11 +884,15 @@ ALTER PUBLICATION testpub_default ADD TABLE pub_test.testpub_nopk; ALTER PUBLICATION testpub_ins_trunct ADD TABLE pub_test.testpub_nopk, testpub_tbl1; +-- \d+ pub_test.testpub_nopk +-- \d+ testpub_tbl1 +-- \dRp+ testpub_default ALTER PUBLICATION testpub_default DROP TABLE testpub_tbl1, pub_test.testpub_nopk; -- fail - nonexistent ALTER PUBLICATION testpub_default DROP TABLE pub_test.testpub_nopk; +-- \d+ testpub_tbl1 -- verify relation cache invalidation when a primary key is added using -- an existing index @@ -865,6 +952,7 @@ REVOKE CREATE ON DATABASE regression FROM regress_publication_user2; DROP TABLE testpub_parted; DROP TABLE testpub_tbl1; +-- \dRp+ testpub_default -- fail - must be owner of publication SET ROLE regress_publication_user_dummy; @@ -873,12 +961,14 @@ RESET ROLE; ALTER PUBLICATION testpub_default RENAME TO testpub_foo; +-- \dRp testpub_foo -- rename back to keep the rest simple ALTER PUBLICATION testpub_foo RENAME TO testpub_default; ALTER PUBLICATION testpub_default OWNER TO regress_publication_user2; +-- \dRp testpub_default -- adding schemas and tables CREATE SCHEMA pub_test1; @@ -893,8 +983,10 @@ CREATE TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"(id int); -- suppress warning that depends on wal_level SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub1_forschema FOR TABLES IN SCHEMA pub_test1; +-- \dRp+ testpub1_forschema CREATE PUBLICATION testpub2_forschema FOR TABLES IN SCHEMA pub_test1, pub_test2, pub_test3; +-- \dRp+ testpub2_forschema -- check create publication on CURRENT_SCHEMA CREATE PUBLICATION testpub3_forschema FOR TABLES IN SCHEMA CURRENT_SCHEMA; @@ -905,6 +997,11 @@ CREATE PUBLICATION testpub_fortable FOR TABLE "CURRENT_SCHEMA"."CURRENT_SCHEMA"; RESET client_min_messages; +-- \dRp+ testpub3_forschema +-- \dRp+ testpub4_forschema +-- \dRp+ testpub5_forschema +-- \dRp+ testpub6_forschema +-- \dRp+ testpub_fortable -- check create publication on CURRENT_SCHEMA where search_path is not set SET SEARCH_PATH=''; @@ -929,46 +1026,59 @@ CREATE PUBLICATION testpub1_forschema1 FOR TABLES IN SCHEMA testpub_view; -- dropping the schema should reflect the change in publication DROP SCHEMA pub_test3; +-- \dRp+ testpub2_forschema -- renaming the schema should reflect the change in publication ALTER SCHEMA pub_test1 RENAME to pub_test1_renamed; +-- \dRp+ testpub2_forschema ALTER SCHEMA pub_test1_renamed RENAME to pub_test1; +-- \dRp+ testpub2_forschema -- alter publication add schema ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test2; +-- \dRp+ testpub1_forschema -- add non existent schema ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA non_existent_schema; +-- \dRp+ testpub1_forschema -- add a schema which is already added to the publication ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA pub_test1; +-- \dRp+ testpub1_forschema -- alter publication drop schema ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; +-- \dRp+ testpub1_forschema -- drop schema that is not present in the publication ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test2; +-- \dRp+ testpub1_forschema -- drop a schema that does not exist in the system ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA non_existent_schema; +-- \dRp+ testpub1_forschema -- drop all schemas ALTER PUBLICATION testpub1_forschema DROP TABLES IN SCHEMA pub_test1; +-- \dRp+ testpub1_forschema -- alter publication set multiple schema ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test2; +-- \dRp+ testpub1_forschema -- alter publication set non-existent schema ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA non_existent_schema; +-- \dRp+ testpub1_forschema -- alter publication set it duplicate schemas should set the schemas after -- removing the duplicate schemas ALTER PUBLICATION testpub1_forschema SET TABLES IN SCHEMA pub_test1, pub_test1; +-- \dRp+ testpub1_forschema -- Verify that it fails to add a schema with a column specification -- ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b); --- ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b); +ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo, bar (a, b); -- cleanup pub_test1 schema for invalidation tests ALTER PUBLICATION testpub2_forschema DROP TABLES IN SCHEMA pub_test1; @@ -1031,7 +1141,9 @@ UPDATE pub_testpart1.child_parent2 set a = 1; SET client_min_messages = 'ERROR'; CREATE PUBLICATION testpub3_forschema; RESET client_min_messages; +-- \dRp+ testpub3_forschema ALTER PUBLICATION testpub3_forschema SET TABLES IN SCHEMA pub_test1; +-- \dRp+ testpub3_forschema -- create publication including both 'FOR TABLE' and 'FOR TABLES IN SCHEMA' SET client_min_messages = 'ERROR'; @@ -1039,6 +1151,8 @@ CREATE PUBLICATION testpub_forschema_fortable FOR TABLES IN SCHEMA pub_test1, TA CREATE PUBLICATION testpub_fortable_forschema FOR TABLE pub_test2.tbl1, TABLES IN SCHEMA pub_test1; RESET client_min_messages; +-- \dRp+ testpub_forschema_fortable +-- \dRp+ testpub_fortable_forschema -- fail specifying table without any of 'FOR TABLES IN SCHEMA' or --'FOR TABLE' or 'FOR ALL TABLES' @@ -1116,16 +1230,15 @@ DROP SCHEMA sch2 cascade; -- ====================================================== -- Test the 'publish_generated_columns' parameter with the following values: --- 'stored', 'none', and the default (no value specified), which defaults to --- 'stored'. +-- 'stored', 'none'. SET client_min_messages = 'ERROR'; CREATE PUBLICATION pub1 FOR ALL TABLES WITH (publish_generated_columns = stored); +-- \dRp+ pub1 CREATE PUBLICATION pub2 FOR ALL TABLES WITH (publish_generated_columns = none); -CREATE PUBLICATION pub3 FOR ALL TABLES WITH (publish_generated_columns); +-- \dRp+ pub2 DROP PUBLICATION pub1; DROP PUBLICATION pub2; -DROP PUBLICATION pub3; -- Test the 'publish_generated_columns' parameter as 'none' and 'stored' for -- different scenarios with/without generated columns in column lists. @@ -1133,24 +1246,109 @@ CREATE TABLE gencols (a int, gen1 int GENERATED ALWAYS AS (a * 2) STORED); -- Generated columns in column list, when 'publish_generated_columns'='none' CREATE PUBLICATION pub1 FOR table gencols(a, gen1) WITH (publish_generated_columns = none); +-- \dRp+ pub1 -- Generated columns in column list, when 'publish_generated_columns'='stored' CREATE PUBLICATION pub2 FOR table gencols(a, gen1) WITH (publish_generated_columns = stored); +-- \dRp+ pub2 -- Generated columns in column list, then set 'publish_generated_columns'='none' ALTER PUBLICATION pub2 SET (publish_generated_columns = none); +-- \dRp+ pub2 -- Remove generated columns from column list, when 'publish_generated_columns'='none' ALTER PUBLICATION pub2 SET TABLE gencols(a); +-- \dRp+ pub2 -- Add generated columns in column list, when 'publish_generated_columns'='none' ALTER PUBLICATION pub2 SET TABLE gencols(a, gen1); +-- \dRp+ pub2 DROP PUBLICATION pub1; DROP PUBLICATION pub2; DROP TABLE gencols; RESET client_min_messages; + +-- Test that the INSERT ON CONFLICT command correctly checks REPLICA IDENTITY +-- when the target table is published. +CREATE TABLE testpub_insert_onconfl_no_ri (a int unique, b int); +CREATE TABLE testpub_insert_onconfl_parted (a int unique, b int) PARTITION by RANGE (a); +CREATE TABLE testpub_insert_onconfl_part_no_ri PARTITION OF testpub_insert_onconfl_parted FOR VALUES FROM (1) TO (10); + +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION pub1 FOR ALL TABLES; +RESET client_min_messages; + +-- fail - missing REPLICA IDENTITY +INSERT INTO testpub_insert_onconfl_no_ri VALUES (1, 1) ON CONFLICT (a) DO UPDATE SET b = 2; + +-- ok - no updates +INSERT INTO testpub_insert_onconfl_no_ri VALUES (1, 1) ON CONFLICT DO NOTHING; + +-- fail - missing REPLICA IDENTITY in partition testpub_insert_onconfl_no_ri +INSERT INTO testpub_insert_onconfl_parted VALUES (1, 1) ON CONFLICT (a) DO UPDATE SET b = 2; + +-- ok - no updates +INSERT INTO testpub_insert_onconfl_parted VALUES (1, 1) ON CONFLICT DO NOTHING; + +DROP PUBLICATION pub1; +DROP TABLE testpub_insert_onconfl_no_ri; +DROP TABLE testpub_insert_onconfl_parted; + +-- Test that the MERGE command correctly checks REPLICA IDENTITY when the +-- target table is published. +CREATE TABLE testpub_merge_no_ri (a int, b int); +CREATE TABLE testpub_merge_pk (a int primary key, b int); + +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION pub1 FOR ALL TABLES; +RESET client_min_messages; + +-- fail - missing REPLICA IDENTITY +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN UPDATE SET b = s.b; + +-- fail - missing REPLICA IDENTITY +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN DELETE; + +-- ok - insert and do nothing are not restricted +MERGE INTO testpub_merge_no_ri USING testpub_merge_pk s ON s.a >= 1 + WHEN MATCHED THEN DO NOTHING + WHEN NOT MATCHED THEN INSERT (a, b) VALUES (0, 0); + +-- ok - REPLICA IDENTITY is DEFAULT and table has a PK +MERGE INTO testpub_merge_pk USING testpub_merge_no_ri s ON s.a >= 1 + WHEN MATCHED AND s.a > 0 THEN UPDATE SET b = s.b + WHEN MATCHED THEN DELETE; + +DROP PUBLICATION pub1; +DROP TABLE testpub_merge_no_ri; +DROP TABLE testpub_merge_pk; + RESET SESSION AUTHORIZATION; DROP ROLE regress_publication_user, regress_publication_user2; DROP ROLE regress_publication_user_dummy; + +-- stage objects for pg_dump tests +CREATE SCHEMA pubme CREATE TABLE t0 (c int, d int) CREATE TABLE t1 (c int); +CREATE SCHEMA pubme2 CREATE TABLE t0 (c int, d int); +SET client_min_messages = 'ERROR'; +CREATE PUBLICATION dump_pub_qual_1ct FOR + TABLE ONLY pubme.t0 (c, d) WHERE (c > 0); +CREATE PUBLICATION dump_pub_qual_2ct FOR + TABLE ONLY pubme.t0 (c) WHERE (c > 0), + TABLE ONLY pubme.t1 (c); +CREATE PUBLICATION dump_pub_nsp_1ct FOR + TABLES IN SCHEMA pubme; +CREATE PUBLICATION dump_pub_nsp_2ct FOR + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2; +CREATE PUBLICATION dump_pub_all FOR + TABLE ONLY pubme.t0, + TABLE ONLY pubme.t1 WHERE (c < 0), + TABLES IN SCHEMA pubme, + TABLES IN SCHEMA pubme2 + WITH (publish_via_partition_root = true); +RESET client_min_messages; diff --git a/crates/squawk_parser/tests/data/regression_suite/random.sql b/crates/squawk_parser/tests/data/regression_suite/random.sql index ebfa7539..890f1468 100644 --- a/crates/squawk_parser/tests/data/regression_suite/random.sql +++ b/crates/squawk_parser/tests/data/regression_suite/random.sql @@ -277,3 +277,29 @@ SELECT random(-1e30, 1e30) FROM generate_series(1, 10); SELECT random(-0.4, 0.4) FROM generate_series(1, 10); SELECT random(0, 1 - 1e-30) FROM generate_series(1, 10); SELECT n, random(0, trim_scale(abs(1 - 10.0^(-n)))) FROM generate_series(-20, 20) n; + +-- random dates +SELECT random('1979-02-08'::date,'2025-07-03'::date) AS random_date_multiple_years; +SELECT random('4714-11-24 BC'::date,'5874897-12-31 AD'::date) AS random_date_maximum_range; +SELECT random('1979-02-08'::date,'1979-02-08'::date) AS random_date_empty_range; +SELECT random('2024-12-31'::date, '2024-01-01'::date); -- fail +SELECT random('-infinity'::date, '2024-01-01'::date); -- fail +SELECT random('2024-12-31'::date, 'infinity'::date); -- fail + +-- random timestamps +SELECT random('1979-02-08'::timestamp,'2025-07-03'::timestamp) AS random_timestamp_multiple_years; +SELECT random('4714-11-24 BC'::timestamp,'294276-12-31 23:59:59.999999'::timestamp) AS random_timestamp_maximum_range; +SELECT random('2024-07-01 12:00:00.000001'::timestamp, '2024-07-01 12:00:00.999999'::timestamp) AS random_narrow_range; +SELECT random('1979-02-08'::timestamp,'1979-02-08'::timestamp) AS random_timestamp_empty_range; +SELECT random('2024-12-31'::timestamp, '2024-01-01'::timestamp); -- fail +SELECT random('-infinity'::timestamp, '2024-01-01'::timestamp); -- fail +SELECT random('2024-12-31'::timestamp, 'infinity'::timestamp); -- fail + +-- random timestamps with timezone +SELECT random('1979-02-08 +01'::timestamptz,'2025-07-03 +02'::timestamptz) AS random_timestamptz_multiple_years; +SELECT random('4714-11-24 BC +00'::timestamptz,'294276-12-31 23:59:59.999999 +00'::timestamptz) AS random_timestamptz_maximum_range; +SELECT random('2024-07-01 12:00:00.000001 +04'::timestamptz, '2024-07-01 12:00:00.999999 +04'::timestamptz) AS random_timestamptz_narrow_range; +SELECT random('1979-02-08 +05'::timestamptz,'1979-02-08 +05'::timestamptz) AS random_timestamptz_empty_range; +SELECT random('2024-01-01 +06'::timestamptz, '2024-01-01 +07'::timestamptz); -- fail +SELECT random('-infinity'::timestamptz, '2024-01-01 +07'::timestamptz); -- fail +SELECT random('2024-01-01 +06'::timestamptz, 'infinity'::timestamptz); -- fail diff --git a/crates/squawk_parser/tests/data/regression_suite/rangetypes.sql b/crates/squawk_parser/tests/data/regression_suite/rangetypes.sql index a5ecdf53..5c4b0337 100644 --- a/crates/squawk_parser/tests/data/regression_suite/rangetypes.sql +++ b/crates/squawk_parser/tests/data/regression_suite/rangetypes.sql @@ -107,6 +107,16 @@ select numrange(1.1, 2.2,'[]') - numrange(2.0, 3.0); select range_minus(numrange(10.1,12.2,'[]'), numrange(110.0,120.2,'(]')); select range_minus(numrange(10.1,12.2,'[]'), numrange(0.0,120.2,'(]')); +select range_minus_multi('empty'::numrange, numrange(2.0, 3.0)); +select range_minus_multi(numrange(1.1, 2.2), 'empty'::numrange); +select range_minus_multi(numrange(1.1, 2.2), numrange(2.0, 3.0)); +select range_minus_multi(numrange(1.1, 2.2), numrange(2.2, 3.0)); +select range_minus_multi(numrange(1.1, 2.2,'[]'), numrange(2.0, 3.0)); +select range_minus_multi(numrange(1.0, 3.0), numrange(1.5, 2.0)); +select range_minus_multi(numrange(10.1,12.2,'[]'), numrange(110.0,120.2,'(]')); +select range_minus_multi(numrange(10.1,12.2,'[]'), numrange(0.0,120.2,'(]')); +select range_minus_multi(numrange(1.0,3.0,'[]'), numrange(1.5,2.0,'(]')); + select numrange(4.5, 5.5, '[]') && numrange(5.5, 6.5); select numrange(1.0, 2.0) << numrange(3.0, 4.0); select numrange(1.0, 3.0,'[]') << numrange(3.0, 4.0,'[]'); diff --git a/crates/squawk_parser/tests/data/regression_suite/regproc.sql b/crates/squawk_parser/tests/data/regression_suite/regproc.sql index 98ef1980..be3a49e8 100644 --- a/crates/squawk_parser/tests/data/regression_suite/regproc.sql +++ b/crates/squawk_parser/tests/data/regression_suite/regproc.sql @@ -47,11 +47,42 @@ SELECT regrole('regress_regrole_test'); SELECT regrole('"regress_regrole_test"'); SELECT regnamespace('pg_catalog'); SELECT regnamespace('"pg_catalog"'); +SELECT regdatabase('template1'); +SELECT regdatabase('"template1"'); SELECT to_regrole('regress_regrole_test'); SELECT to_regrole('"regress_regrole_test"'); SELECT to_regnamespace('pg_catalog'); SELECT to_regnamespace('"pg_catalog"'); +SELECT to_regdatabase('template1'); +SELECT to_regdatabase('"template1"'); + +-- special "single dash" case + +SELECT regproc('-')::oid; +SELECT regprocedure('-')::oid; +SELECT regclass('-')::oid; +SELECT regcollation('-')::oid; +SELECT regtype('-')::oid; +SELECT regconfig('-')::oid; +SELECT regdictionary('-')::oid; +SELECT regrole('-')::oid; +SELECT regnamespace('-')::oid; +SELECT regdatabase('-')::oid; + +SELECT to_regproc('-')::oid; +SELECT to_regprocedure('-')::oid; +SELECT to_regclass('-')::oid; +SELECT to_regcollation('-')::oid; +SELECT to_regtype('-')::oid; +SELECT to_regrole('-')::oid; +SELECT to_regnamespace('-')::oid; +SELECT to_regdatabase('-')::oid; + +-- constant cannot be used here + +CREATE TABLE regrole_test (rolid OID DEFAULT 'regress_regrole_test'::regrole); +CREATE TABLE regdatabase_test (datid OID DEFAULT 'template1'::regdatabase); /* If objects don't exist, raise errors. */ @@ -74,7 +105,9 @@ SELECT regproc('ng_catalog.now'); SELECT regprocedure('ng_catalog.abs(numeric)'); SELECT regclass('ng_catalog.pg_class'); SELECT regtype('ng_catalog.int4'); +-- \set VERBOSITY sqlstate \\ -- error message is encoding-dependent SELECT regcollation('ng_catalog."POSIX"'); +-- \set VERBOSITY default -- schemaname not applicable @@ -86,6 +119,9 @@ SELECT regrole('foo.bar'); SELECT regnamespace('Nonexistent'); SELECT regnamespace('"Nonexistent"'); SELECT regnamespace('foo.bar'); +SELECT regdatabase('Nonexistent'); +SELECT regdatabase('"Nonexistent"'); +SELECT regdatabase('foo.bar'); /* If objects don't exist, return NULL with no error. */ @@ -120,6 +156,9 @@ SELECT to_regrole('foo.bar'); SELECT to_regnamespace('Nonexistent'); SELECT to_regnamespace('"Nonexistent"'); SELECT to_regnamespace('foo.bar'); +SELECT to_regdatabase('Nonexistent'); +SELECT to_regdatabase('"Nonexistent"'); +SELECT to_regdatabase('foo.bar'); -- Test to_regtypemod SELECT to_regtypemod('text'); @@ -145,6 +184,7 @@ SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric)', 'regprocedure'); SELECT * FROM pg_input_error_info('ng_catalog.abs(numeric', 'regprocedure'); SELECT * FROM pg_input_error_info('regress_regrole_test', 'regrole'); SELECT * FROM pg_input_error_info('no_such_type', 'regtype'); +SELECT * FROM pg_input_error_info('Nonexistent', 'regdatabase'); -- Some cases that should be soft errors, but are not yet SELECT * FROM pg_input_error_info('incorrect type name syntax', 'regtype'); diff --git a/crates/squawk_parser/tests/data/regression_suite/reloptions.sql b/crates/squawk_parser/tests/data/regression_suite/reloptions.sql index 56d49a50..8946b465 100644 --- a/crates/squawk_parser/tests/data/regression_suite/reloptions.sql +++ b/crates/squawk_parser/tests/data/regression_suite/reloptions.sql @@ -89,7 +89,7 @@ DROP TABLE reloptions_test; CREATE TABLE reloptions_test (s VARCHAR) WITH (toast.autovacuum_vacuum_cost_delay = 23); SELECT reltoastrelid as toast_oid - FROM pg_class WHERE oid = 'reloptions_test'::regclass ; + FROM pg_class WHERE oid = 'reloptions_test'::regclass /* \gset */; SELECT reloptions FROM pg_class WHERE oid = 'toast_oid'; ALTER TABLE reloptions_test SET (toast.autovacuum_vacuum_cost_delay = 24); diff --git a/crates/squawk_parser/tests/data/regression_suite/replica_identity.sql b/crates/squawk_parser/tests/data/regression_suite/replica_identity.sql index 52f6351e..cceb0bf7 100644 --- a/crates/squawk_parser/tests/data/regression_suite/replica_identity.sql +++ b/crates/squawk_parser/tests/data/regression_suite/replica_identity.sql @@ -53,6 +53,7 @@ SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; -- succeed, primary key ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_pkey; SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; +-- \d test_replica_identity -- succeed, nondeferrable unique constraint over nonnullable cols ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_unique_nondefer; @@ -61,6 +62,7 @@ ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_iden ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab_key; ALTER TABLE test_replica_identity REPLICA IDENTITY USING INDEX test_replica_identity_keyab_key; SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; +-- \d test_replica_identity SELECT count(*) FROM pg_index WHERE indrelid = 'test_replica_identity'::regclass AND indisreplident; ---- @@ -72,6 +74,7 @@ SELECT count(*) FROM pg_index WHERE indrelid = 'test_replica_identity'::regclass ALTER TABLE test_replica_identity REPLICA IDENTITY FULL; SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; +-- \d+ test_replica_identity ALTER TABLE test_replica_identity REPLICA IDENTITY NOTHING; SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; @@ -82,13 +85,17 @@ SELECT relreplident FROM pg_class WHERE oid = 'test_replica_identity'::regclass; -- constraint variant CREATE TABLE test_replica_identity2 (id int UNIQUE NOT NULL); ALTER TABLE test_replica_identity2 REPLICA IDENTITY USING INDEX test_replica_identity2_id_key; +-- \d test_replica_identity2 ALTER TABLE test_replica_identity2 ALTER COLUMN id TYPE bigint; +-- \d test_replica_identity2 -- straight index variant CREATE TABLE test_replica_identity3 (id int NOT NULL); CREATE UNIQUE INDEX test_replica_identity3_id_key ON test_replica_identity3 (id); ALTER TABLE test_replica_identity3 REPLICA IDENTITY USING INDEX test_replica_identity3_id_key; +-- \d test_replica_identity3 ALTER TABLE test_replica_identity3 ALTER COLUMN id TYPE bigint; +-- \d test_replica_identity3 -- ALTER TABLE DROP NOT NULL is not allowed for columns part of an index -- used as replica identity. @@ -111,8 +118,10 @@ ALTER TABLE ONLY test_replica_identity4 REPLICA IDENTITY USING INDEX test_replica_identity4_pkey; ALTER TABLE ONLY test_replica_identity4_1 ADD CONSTRAINT test_replica_identity4_1_pkey PRIMARY KEY (id); +-- \d+ test_replica_identity4 ALTER INDEX test_replica_identity4_pkey ATTACH PARTITION test_replica_identity4_1_pkey; +-- \d+ test_replica_identity4 -- Dropping the primary key is not allowed if that would leave the replica -- identity as nullable diff --git a/crates/squawk_parser/tests/data/regression_suite/returning.sql b/crates/squawk_parser/tests/data/regression_suite/returning.sql index befb1bff..7f5d87ab 100644 --- a/crates/squawk_parser/tests/data/regression_suite/returning.sql +++ b/crates/squawk_parser/tests/data/regression_suite/returning.sql @@ -195,10 +195,10 @@ INSERT INTO foo VALUES (1, 'xxx', 10, 20), (2, 'more', 42, 141), (3, 'zoo2', 57, -- Error cases -- INSERT INTO foo DEFAULT VALUES RETURNING WITH (nonsuch AS something) *; --- INSERT INTO foo DEFAULT VALUES RETURNING WITH (new AS foo) *; --- INSERT INTO foo DEFAULT VALUES RETURNING WITH (old AS o, new AS n, old AS o) *; --- INSERT INTO foo DEFAULT VALUES RETURNING WITH (old AS o, new AS n, new AS n) *; --- INSERT INTO foo DEFAULT VALUES RETURNING WITH (old AS x, new AS x) *; +INSERT INTO foo DEFAULT VALUES RETURNING WITH (new AS foo) *; +INSERT INTO foo DEFAULT VALUES RETURNING WITH (old AS o, new AS n, old AS o) *; +INSERT INTO foo DEFAULT VALUES RETURNING WITH (old AS o, new AS n, new AS n) *; +INSERT INTO foo DEFAULT VALUES RETURNING WITH (old AS x, new AS x) *; -- INSERT has NEW, but not OLD EXPLAIN (verbose, costs off) @@ -406,4 +406,5 @@ BEGIN ATOMIC (SELECT count(*) FROM foo WHERE foo = n); END; +-- \sf foo_update DROP FUNCTION foo_update; diff --git a/crates/squawk_parser/tests/data/regression_suite/rowsecurity.sql b/crates/squawk_parser/tests/data/regression_suite/rowsecurity.sql index 98347ba2..f19eb4e5 100644 --- a/crates/squawk_parser/tests/data/regression_suite/rowsecurity.sql +++ b/crates/squawk_parser/tests/data/regression_suite/rowsecurity.sql @@ -41,6 +41,145 @@ CREATE OR REPLACE FUNCTION f_leak(text) RETURNS bool AS 'BEGIN RAISE NOTICE ''f_leak => %'', $1; RETURN true; END'; GRANT EXECUTE ON FUNCTION f_leak(text) TO public; +-- +-- Test policies applied by command type +-- +SET SESSION AUTHORIZATION regress_rls_alice; + +-- setup source table (for MERGE operations) +CREATE TABLE rls_test_src (a int PRIMARY KEY, b text); +ALTER TABLE rls_test_src ENABLE ROW LEVEL SECURITY; +GRANT SELECT, UPDATE ON rls_test_src TO public; +INSERT INTO rls_test_src VALUES (1, 'src a'); + +-- setup target table with a column set by a BEFORE ROW trigger +-- (policies should always see values set by the trigger) +CREATE TABLE rls_test_tgt (a int PRIMARY KEY, b text, c text); +ALTER TABLE rls_test_tgt ENABLE ROW LEVEL SECURITY; +GRANT SELECT, INSERT, UPDATE, DELETE, TRUNCATE ON rls_test_tgt TO public; + +CREATE FUNCTION rls_test_tgt_set_c() RETURNS trigger AS + $$ BEGIN new.c = upper(new.b); RETURN new; END; $$ + LANGUAGE plpgsql; +CREATE TRIGGER rls_test_tgt_set_c BEFORE INSERT OR UPDATE ON rls_test_tgt + FOR EACH ROW EXECUTE FUNCTION rls_test_tgt_set_c(); + +-- setup a complete set of policies that emit NOTICE messages when applied +CREATE FUNCTION rls_test_policy_fn(text, record) RETURNS bool AS + $$ BEGIN RAISE NOTICE '%.%', $1, $2; RETURN true; END; $$ + LANGUAGE plpgsql; + +CREATE POLICY sel_pol ON rls_test_src FOR SELECT + USING (rls_test_policy_fn('SELECT USING on rls_test_src', rls_test_src)); +CREATE POLICY upd_pol ON rls_test_src FOR UPDATE + USING (rls_test_policy_fn('UPDATE USING on rls_test_src', rls_test_src)) + WITH CHECK (rls_test_policy_fn('UPDATE CHECK on rls_test_src', rls_test_src)); + +CREATE POLICY sel_pol ON rls_test_tgt FOR SELECT + USING (rls_test_policy_fn('SELECT USING on rls_test_tgt', rls_test_tgt)); +CREATE POLICY ins_pol ON rls_test_tgt FOR INSERT + WITH CHECK (rls_test_policy_fn('INSERT CHECK on rls_test_tgt', rls_test_tgt)); +CREATE POLICY upd_pol ON rls_test_tgt FOR UPDATE + USING (rls_test_policy_fn('UPDATE USING on rls_test_tgt', rls_test_tgt)) + WITH CHECK (rls_test_policy_fn('UPDATE CHECK on rls_test_tgt', rls_test_tgt)); +CREATE POLICY del_pol ON rls_test_tgt FOR DELETE + USING (rls_test_policy_fn('DELETE USING on rls_test_tgt', rls_test_tgt)); + +-- test policies applied to regress_rls_bob +SET SESSION AUTHORIZATION regress_rls_bob; + +-- SELECT, COPY ... TO, and TABLE should only apply SELECT USING policy clause +SELECT * FROM rls_test_src; +COPY rls_test_src TO stdout; +TABLE rls_test_src; + +-- SELECT ... FOR UPDATE/SHARE should also apply UPDATE USING policy clause +SELECT * FROM rls_test_src FOR UPDATE; +SELECT * FROM rls_test_src FOR NO KEY UPDATE; +SELECT * FROM rls_test_src FOR SHARE; +SELECT * FROM rls_test_src FOR KEY SHARE; + +-- plain INSERT should apply INSERT CHECK policy clause +INSERT INTO rls_test_tgt VALUES (1, 'tgt a'); + +-- INSERT ... RETURNING should also apply SELECT USING policy clause +TRUNCATE rls_test_tgt; +INSERT INTO rls_test_tgt VALUES (1, 'tgt a') RETURNING *; + +-- UPDATE without WHERE or RETURNING should only apply UPDATE policy clauses +UPDATE rls_test_tgt SET b = 'tgt b'; + +-- UPDATE with WHERE or RETURNING should also apply SELECT USING policy clause +-- (to both old and new values) +UPDATE rls_test_tgt SET b = 'tgt c' WHERE a = 1; +UPDATE rls_test_tgt SET b = 'tgt d' RETURNING *; + +-- DELETE without WHERE or RETURNING should only apply DELETE USING policy clause +BEGIN; DELETE FROM rls_test_tgt; ROLLBACK; + +-- DELETE with WHERE or RETURNING should also apply SELECT USING policy clause +BEGIN; DELETE FROM rls_test_tgt WHERE a = 1; ROLLBACK; +DELETE FROM rls_test_tgt RETURNING *; + +-- INSERT ... ON CONFLICT DO NOTHING should apply INSERT CHECK and SELECT USING +-- policy clauses (to new value, whether it conflicts or not) +INSERT INTO rls_test_tgt VALUES (1, 'tgt a') ON CONFLICT (a) DO NOTHING; +INSERT INTO rls_test_tgt VALUES (1, 'tgt b') ON CONFLICT (a) DO NOTHING; + +-- INSERT ... ON CONFLICT DO NOTHING without an arbiter clause only applies +-- INSERT CHECK policy clause +INSERT INTO rls_test_tgt VALUES (1, 'tgt b') ON CONFLICT DO NOTHING; + +-- INSERT ... ON CONFLICT DO UPDATE should apply INSERT CHECK and SELECT USING +-- policy clauses to values proposed for insert. In the event of a conflict it +-- should also apply UPDATE and SELECT policies to old and new values, like +-- UPDATE ... WHERE. +BEGIN; +INSERT INTO rls_test_tgt VALUES (2, 'tgt a') ON CONFLICT (a) DO UPDATE SET b = 'tgt b'; +INSERT INTO rls_test_tgt VALUES (2, 'tgt c') ON CONFLICT (a) DO UPDATE SET b = 'tgt d'; +INSERT INTO rls_test_tgt VALUES (3, 'tgt a') ON CONFLICT (a) DO UPDATE SET b = 'tgt b' RETURNING *; +INSERT INTO rls_test_tgt VALUES (3, 'tgt c') ON CONFLICT (a) DO UPDATE SET b = 'tgt d' RETURNING *; +ROLLBACK; + +-- MERGE should always apply SELECT USING policy clauses to both source and +-- target rows +MERGE INTO rls_test_tgt t USING rls_test_src s ON t.a = s.a + WHEN NOT MATCHED THEN DO NOTHING; + +-- MERGE ... INSERT should behave like INSERT on target table +-- (SELECT policy applied to target, if RETURNING is specified) +TRUNCATE rls_test_tgt; +MERGE INTO rls_test_tgt t USING rls_test_src s ON t.a = s.a + WHEN NOT MATCHED THEN INSERT VALUES (1, 'tgt a'); +TRUNCATE rls_test_tgt; +MERGE INTO rls_test_tgt t USING rls_test_src s ON t.a = s.a + WHEN NOT MATCHED THEN INSERT VALUES (1, 'tgt a') + RETURNING *; + +-- MERGE ... UPDATE should behave like UPDATE ... WHERE on target table +-- (join clause is like WHERE, so SELECT policies are always applied) +MERGE INTO rls_test_tgt t USING rls_test_src s ON t.a = s.a + WHEN MATCHED THEN UPDATE SET b = 'tgt b'; +MERGE INTO rls_test_tgt t USING rls_test_src s ON t.a = s.a + WHEN MATCHED THEN UPDATE SET b = 'tgt c' + RETURNING *; + +-- MERGE ... DELETE should behave like DELETE ... WHERE on target table +-- (join clause is like WHERE, so SELECT policies are always applied) +BEGIN; +MERGE INTO rls_test_tgt t USING rls_test_src s ON t.a = s.a + WHEN MATCHED THEN DELETE; +ROLLBACK; +MERGE INTO rls_test_tgt t USING rls_test_src s ON t.a = s.a + WHEN MATCHED THEN DELETE + RETURNING *; + +-- Tidy up +RESET SESSION AUTHORIZATION; +DROP TABLE rls_test_src, rls_test_tgt; +DROP FUNCTION rls_test_tgt_set_c; +DROP FUNCTION rls_test_policy_fn; + -- BASIC Row-Level Security Scenario SET SESSION AUTHORIZATION regress_rls_alice; @@ -107,6 +246,8 @@ CREATE POLICY p2r ON document AS RESTRICTIVE TO regress_rls_dave CREATE POLICY p1r ON document AS RESTRICTIVE TO regress_rls_dave USING (cid <> 44); +-- \dp +-- \d document SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename = 'document' ORDER BY policyname; -- viewpoint from regress_rls_bob @@ -234,15 +375,32 @@ CREATE TABLE t1 (id int not null primary key, a int, junk1 text, b text); ALTER TABLE t1 DROP COLUMN junk1; -- just a disturbing factor GRANT ALL ON t1 TO public; +COPY t1 FROM stdin WITH ; +-- 101 1 aba +-- 102 2 bbb +-- 103 3 ccc +-- 104 4 dad +-- \. CREATE TABLE t2 (c float) INHERITS (t1); GRANT ALL ON t2 TO public; +COPY t2 FROM stdin; +-- 201 1 abc 1.1 +-- 202 2 bcd 2.2 +-- 203 3 cde 3.3 +-- 204 4 def 4.4 +-- \. CREATE TABLE t3 (id int not null primary key, c text, b text, a int); ALTER TABLE t3 INHERIT t1; GRANT ALL ON t3 TO public; +COPY t3(id, a,b,c) FROM stdin; +-- 301 1 xxx X +-- 302 2 yyy Y +-- 303 3 zzz Z +-- \. CREATE POLICY p1 ON t1 FOR ALL TO PUBLIC USING (a % 2 = 0); -- be even number CREATE POLICY p2 ON t2 FOR ALL TO PUBLIC USING (a % 2 = 1); -- be odd number @@ -336,22 +494,26 @@ CREATE POLICY pp1 ON part_document AS PERMISSIVE CREATE POLICY pp1r ON part_document AS RESTRICTIVE TO regress_rls_dave USING (cid < 55); +-- \d+ part_document SELECT * FROM pg_policies WHERE schemaname = 'regress_rls_schema' AND tablename like '%part_document%' ORDER BY policyname; -- viewpoint from regress_rls_bob SET SESSION AUTHORIZATION regress_rls_bob; SET row_security TO ON; SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +COPY part_document TO stdout WITH (DELIMITER ','); EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); -- viewpoint from regress_rls_carol SET SESSION AUTHORIZATION regress_rls_carol; SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +COPY part_document TO stdout WITH (DELIMITER ','); EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); -- viewpoint from regress_rls_dave SET SESSION AUTHORIZATION regress_rls_dave; SELECT * FROM part_document WHERE f_leak(dtitle) ORDER BY did; +COPY part_document TO stdout WITH (DELIMITER ','); EXPLAIN (COSTS OFF) SELECT * FROM part_document WHERE f_leak(dtitle); -- pp1 ERROR @@ -1615,23 +1777,49 @@ COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied -- Check COPY FROM as Superuser/owner. RESET SESSION AUTHORIZATION; SET row_security TO OFF; +COPY copy_t FROM STDIN; --ok +-- 1 abc +-- 2 bcd +-- 3 cde +-- 4 def +-- \. SET row_security TO ON; +COPY copy_t FROM STDIN; --ok +-- 1 abc +-- 2 bcd +-- 3 cde +-- 4 def +-- \. -- Check COPY FROM as user with permissions. SET SESSION AUTHORIZATION regress_rls_bob; SET row_security TO OFF; --- Check COPY FROM as user with permissions and BYPASSRLS +COPY copy_t FROM STDIN; --fail - would be affected by RLS. +-- SET row_security TO ON; +COPY copy_t FROM STDIN; --fail - COPY FROM not supported by RLS. +-- +-- -- Check COPY FROM as user with permissions and BYPASSRLS SET SESSION AUTHORIZATION regress_rls_exempt_user; SET row_security TO ON; +COPY copy_t FROM STDIN; --ok +-- 1 abc +-- 2 bcd +-- 3 cde +-- 4 def +-- \. -- Check COPY FROM as user without permissions. SET SESSION AUTHORIZATION regress_rls_carol; SET row_security TO OFF; -RESET SESSION AUTHORIZATION; -DROP TABLE copy_t; -DROP TABLE copy_rel_to CASCADE; - --- Check WHERE CURRENT OF +COPY copy_t FROM STDIN; --fail - permission denied. +-- SET row_security TO ON; +COPY copy_t FROM STDIN; --fail - permission denied. +-- +-- RESET SESSION AUTHORIZATION; +-- DROP TABLE copy_t; +-- DROP TABLE copy_rel_to CASCADE; +-- +-- -- Check WHERE CURRENT OF SET SESSION AUTHORIZATION regress_rls_alice; CREATE TABLE current_check (currentid int, payload text, rlsuser text); @@ -2143,7 +2331,7 @@ DROP VIEW rls_view; DROP TABLE rls_tbl; DROP TABLE ref_tbl; --- Leaky operator test +-- Leaky operator tests CREATE TABLE rls_tbl (a int); INSERT INTO rls_tbl SELECT x/10 FROM generate_series(1, 100) x; ANALYZE rls_tbl; @@ -2159,9 +2347,58 @@ CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, restrict = scalarltsel); SELECT * FROM rls_tbl WHERE a <<< 1000; EXPLAIN (COSTS OFF) SELECT * FROM rls_tbl WHERE a <<< 1000 or a <<< 900; +RESET SESSION AUTHORIZATION; + +CREATE TABLE rls_child_tbl () INHERITS (rls_tbl); +INSERT INTO rls_child_tbl SELECT x/10 FROM generate_series(1, 100) x; +ANALYZE rls_child_tbl; + +CREATE TABLE rls_ptbl (a int) PARTITION BY RANGE (a); +CREATE TABLE rls_part PARTITION OF rls_ptbl FOR VALUES FROM (-100) TO (100); +INSERT INTO rls_ptbl SELECT x/10 FROM generate_series(1, 100) x; +ANALYZE rls_ptbl, rls_part; + +ALTER TABLE rls_ptbl ENABLE ROW LEVEL SECURITY; +ALTER TABLE rls_part ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON rls_ptbl TO regress_rls_alice; +GRANT SELECT ON rls_part TO regress_rls_alice; +CREATE POLICY p1 ON rls_tbl USING (a < 0); +CREATE POLICY p2 ON rls_ptbl USING (a < 0); +CREATE POLICY p3 ON rls_part USING (a < 0); + +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_tbl WHERE a <<< 1000; +SELECT * FROM rls_child_tbl WHERE a <<< 1000; +SELECT * FROM rls_ptbl WHERE a <<< 1000; +SELECT * FROM rls_part WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_tbl UNION ALL + SELECT * FROM rls_tbl) t WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL + SELECT * FROM rls_child_tbl) t WHERE a <<< 1000; +RESET SESSION AUTHORIZATION; + +REVOKE SELECT ON rls_tbl FROM regress_rls_alice; +CREATE VIEW rls_tbl_view AS SELECT * FROM rls_tbl; + +ALTER TABLE rls_child_tbl ENABLE ROW LEVEL SECURITY; +GRANT SELECT ON rls_child_tbl TO regress_rls_alice; +CREATE POLICY p4 ON rls_child_tbl USING (a < 0); + +SET SESSION AUTHORIZATION regress_rls_alice; +SELECT * FROM rls_tbl WHERE a <<< 1000; +SELECT * FROM rls_tbl_view WHERE a <<< 1000; +SELECT * FROM rls_child_tbl WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_tbl UNION ALL + SELECT * FROM rls_tbl) t WHERE a <<< 1000; +SELECT * FROM (SELECT * FROM rls_child_tbl UNION ALL + SELECT * FROM rls_child_tbl) t WHERE a <<< 1000; DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); RESET SESSION AUTHORIZATION; +DROP TABLE rls_part; +DROP TABLE rls_ptbl; +DROP TABLE rls_child_tbl; +DROP VIEW rls_tbl_view; DROP TABLE rls_tbl; -- Bug #16006: whole-row Vars in a policy don't play nice with sub-selects diff --git a/crates/squawk_parser/tests/data/regression_suite/rules.sql b/crates/squawk_parser/tests/data/regression_suite/rules.sql index e7783e41..000f77ab 100644 --- a/crates/squawk_parser/tests/data/regression_suite/rules.sql +++ b/crates/squawk_parser/tests/data/regression_suite/rules.sql @@ -185,6 +185,8 @@ select * from rtest_v1; -- ** Remember the delete rule on rtest_v1: It says -- ** DO INSTEAD DELETE FROM rtest_t1 WHERE a = old.a -- ** So this time both rows with a = 2 must get deleted +-- \p +-- \r delete from rtest_v1 where b = 12; select * from rtest_v1; delete from rtest_v1; @@ -771,6 +773,7 @@ drop table cchild; -- -- temporarily disable fancy output, so view changes create less diff noise +-- \a\t SELECT viewname, definition FROM pg_views WHERE schemaname = 'pg_catalog' @@ -781,6 +784,7 @@ WHERE schemaname = 'pg_catalog' ORDER BY tablename, rulename; -- restore normal output mode +-- \a\t -- -- CREATE OR REPLACE RULE @@ -1029,6 +1033,7 @@ create rule r7 as on delete to rules_src do instead returning trgt.f1, trgt.f2; -- check display of all rules added above +-- \d+ rules_src -- -- Also check multiassignment deparsing. @@ -1038,6 +1043,7 @@ create table rule_dest(f1 int, f2 int[], tag text); create rule rr as on update to rule_t1 do instead UPDATE rule_dest trgt SET (f2[1], f1, tag) = (SELECT new.f2, new.f1, 'updated'::varchar) WHERE trgt.f1 = new.f1 RETURNING new.*; +-- \d+ rule_t1 drop table rule_t1, rule_dest; -- @@ -1073,6 +1079,7 @@ ALTER RULE InsertRule ON rule_v1 RENAME to NewInsertRule; INSERT INTO rule_v1 VALUES(1); SELECT * FROM rule_v1; +-- \d+ rule_v1 -- -- error conditions for alter rename rule @@ -1088,13 +1095,18 @@ DROP TABLE rule_t1; -- check display of VALUES in view definitions -- create view rule_v1 as values(1,2); +-- \d+ rule_v1 alter table rule_v1 rename column column2 to q2; +-- \d+ rule_v1 drop view rule_v1; create view rule_v1(x) as values(1,2); +-- \d+ rule_v1 drop view rule_v1; create view rule_v1(x) as select * from (values(1,2)) v; +-- \d+ rule_v1 drop view rule_v1; create view rule_v1(x) as select * from (values(1,2)) v(q,w); +-- \d+ rule_v1 drop view rule_v1; -- @@ -1205,6 +1217,7 @@ CREATE FUNCTION func_with_set_params() RETURNS integer SET extra_float_digits TO 2 SET work_mem TO '4MB' SET datestyle to iso, mdy + SET temp_tablespaces to NULL SET local_preload_libraries TO "Mixed/Case", 'c:/''a"/path', '', '0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789' IMMUTABLE STRICT; SELECT pg_get_functiondef('func_with_set_params()'::regprocedure); @@ -1328,6 +1341,7 @@ RETURNING merge_action() AS action, *, o.*, n.*; END; +-- \sf merge_sf_test CREATE FUNCTION merge_sf_test2() RETURNS void @@ -1345,6 +1359,7 @@ WHEN NOT MATCHED BY SOURCE THEN DELETE; END; +-- \sf merge_sf_test2 DROP FUNCTION merge_sf_test; DROP FUNCTION merge_sf_test2; diff --git a/crates/squawk_parser/tests/data/regression_suite/select_parallel.sql b/crates/squawk_parser/tests/data/regression_suite/select_parallel.sql index e4b2af78..5ef46994 100644 --- a/crates/squawk_parser/tests/data/regression_suite/select_parallel.sql +++ b/crates/squawk_parser/tests/data/regression_suite/select_parallel.sql @@ -7,7 +7,7 @@ select pg_stat_force_next_flush(); select parallel_workers_to_launch as parallel_workers_to_launch_before, parallel_workers_launched as parallel_workers_launched_before from pg_stat_database - where datname = current_database() ; + where datname = current_database() /* \gset */; create function sp_parallel_restricted(int) returns int as $$begin return $1; end$$ language plpgsql parallel restricted; diff --git a/crates/squawk_parser/tests/data/regression_suite/sequence.sql b/crates/squawk_parser/tests/data/regression_suite/sequence.sql index 0c287e83..3df88a92 100644 --- a/crates/squawk_parser/tests/data/regression_suite/sequence.sql +++ b/crates/squawk_parser/tests/data/regression_suite/sequence.sql @@ -245,6 +245,8 @@ WHERE sequencename ~ ANY(ARRAY['sequence_test', 'serialtest']) SELECT * FROM pg_sequence_parameters('sequence_test4'::regclass); +-- \d sequence_test4 +-- \d serialtest2_f2_seq -- Test comments @@ -273,7 +275,9 @@ SELECT lastval(); -- (more tests in src/test/recovery/) CREATE UNLOGGED SEQUENCE sequence_test_unlogged; ALTER SEQUENCE sequence_test_unlogged SET LOGGED; +-- \d sequence_test_unlogged ALTER SEQUENCE sequence_test_unlogged SET UNLOGGED; +-- \d sequence_test_unlogged DROP SEQUENCE sequence_test_unlogged; -- Test sequences in read-only transactions @@ -410,6 +414,6 @@ SELECT nextval('test_seq1'); SELECT nextval('test_seq1'); -- pg_get_sequence_data -SELECT * FROM pg_get_sequence_data('test_seq1'); +SELECT last_value, is_called, page_lsn <= pg_current_wal_lsn() as lsn FROM pg_get_sequence_data('test_seq1'); DROP SEQUENCE test_seq1; diff --git a/crates/squawk_parser/tests/data/regression_suite/sqljson.sql b/crates/squawk_parser/tests/data/regression_suite/sqljson.sql index b8a6b0a7..436e33df 100644 --- a/crates/squawk_parser/tests/data/regression_suite/sqljson.sql +++ b/crates/squawk_parser/tests/data/regression_suite/sqljson.sql @@ -232,6 +232,7 @@ SELECT JSON_ARRAYAGG(NULL NULL ON NULL), JSON_ARRAYAGG(NULL NULL ON NULL RETURNING jsonb) FROM generate_series(1, 5); +-- \x SELECT JSON_ARRAYAGG(bar) as no_options, JSON_ARRAYAGG(bar RETURNING jsonb) as returning_jsonb, @@ -245,6 +246,7 @@ SELECT JSON_ARRAYAGG(foo ORDER BY bar RETURNING jsonb) FILTER (WHERE bar > 2) as row_filtered_agg_returning_jsonb FROM (VALUES (NULL), (3), (1), (NULL), (NULL), (5), (2), (4), (NULL)) foo(bar); +-- \x SELECT bar, JSON_ARRAYAGG(bar) FILTER (WHERE bar > 2) OVER (PARTITION BY foo.bar % 2) @@ -306,6 +308,7 @@ SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); CREATE VIEW json_object_view AS SELECT JSON_OBJECT('foo' : '1' FORMAT JSON, 'bar' : 'baz' RETURNING json); +-- \sv json_object_view DROP VIEW json_object_view; @@ -334,6 +337,7 @@ SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); CREATE VIEW json_array_view AS SELECT JSON_ARRAY('1' FORMAT JSON, 2 RETURNING json); +-- \sv json_array_view DROP VIEW json_array_view; @@ -350,6 +354,7 @@ CREATE VIEW json_objectagg_view AS SELECT JSON_OBJECTAGG(i: ('111' || i)::bytea FORMAT JSON WITH UNIQUE RETURNING text) FILTER (WHERE i > 3) FROM generate_series(1,5) i; +-- \sv json_objectagg_view DROP VIEW json_objectagg_view; @@ -366,6 +371,7 @@ CREATE VIEW json_arrayagg_view AS SELECT JSON_ARRAYAGG(('111' || i)::bytea FORMAT JSON NULL ON NULL RETURNING text) FILTER (WHERE i > 3) FROM generate_series(1,5) i; +-- \sv json_arrayagg_view DROP VIEW json_arrayagg_view; @@ -376,6 +382,7 @@ SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING CREATE VIEW json_array_subquery_view AS SELECT JSON_ARRAY(SELECT i FROM (VALUES (1), (2), (NULL), (4)) foo(i) RETURNING jsonb); +-- \sv json_array_subquery_view DROP VIEW json_array_subquery_view; @@ -471,6 +478,7 @@ SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT CREATE VIEW is_json_view AS SELECT '1' IS JSON AS "any", ('1' || i) IS JSON SCALAR AS "scalar", '[]' IS NOT JSON ARRAY AS "array", '{}' IS JSON OBJECT WITH UNIQUE AS "object" FROM generate_series(1, 3) i; +-- \sv is_json_view DROP VIEW is_json_view; diff --git a/crates/squawk_parser/tests/data/regression_suite/sqljson_jsontable.sql b/crates/squawk_parser/tests/data/regression_suite/sqljson_jsontable.sql index d461fee5..e4898180 100644 --- a/crates/squawk_parser/tests/data/regression_suite/sqljson_jsontable.sql +++ b/crates/squawk_parser/tests/data/regression_suite/sqljson_jsontable.sql @@ -13,9 +13,9 @@ SELECT * FROM JSON_TABLE('[]', 'strict $.a' COLUMNS (js2 int PATH '$') ERROR ON SELECT * FROM JSON_TABLE(jsonb'"1.23"', '$.a' as js2 COLUMNS (js2 int path '$')); -- Should fail (no columns) --- SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS ()); +SELECT * FROM JSON_TABLE(NULL, '$' COLUMNS ()); --- SELECT * FROM JSON_TABLE (NULL::jsonb, '$' COLUMNS (v1 timestamp)) AS f (v1, v2); +SELECT * FROM JSON_TABLE (NULL::jsonb, '$' COLUMNS (v1 timestamp)) AS f (v1, v2); --duplicated column name SELECT * FROM JSON_TABLE(jsonb'"1.23"', '$.a' COLUMNS (js2 int path '$', js2 int path '$')); @@ -188,6 +188,11 @@ SELECT * FROM ta text[] PATH '$', jba jsonb[] PATH '$')); +-- \sv jsonb_table_view2 +-- \sv jsonb_table_view3 +-- \sv jsonb_table_view4 +-- \sv jsonb_table_view5 +-- \sv jsonb_table_view6 EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view2; EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM jsonb_table_view3; @@ -445,6 +450,7 @@ SELECT * FROM ) ); +-- \sv jsonb_table_view_nested DROP VIEW jsonb_table_view_nested; CREATE TABLE s (js jsonb); @@ -523,6 +529,7 @@ SELECT sub.* FROM s, NESTED PATH '$.a.za[1]' COLUMNS (NESTED PATH '$.z21[*] ? (@ >= ($"y" +121))' as z21 COLUMNS (b int PATH '$ ? (@ > ($"x" +111))' DEFAULT 0 ON EMPTY)) )) sub; +-- \sv jsonb_table_view7 DROP VIEW jsonb_table_view7; DROP TABLE s; @@ -539,14 +546,18 @@ SELECT * FROM JSON_TABLE(jsonb '1', '$' COLUMNS (a int exists empty object on er -- Test JSON_TABLE() column deparsing -- don't emit default ON ERROR / EMPTY -- behavior CREATE VIEW json_table_view8 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$')); +-- \sv json_table_view8; CREATE VIEW json_table_view9 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') ERROR ON ERROR); +-- \sv json_table_view9; DROP VIEW json_table_view8, json_table_view9; -- Test JSON_TABLE() deparsing -- don't emit default ON ERROR behavior CREATE VIEW json_table_view8 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') EMPTY ON ERROR); +-- \sv json_table_view8; CREATE VIEW json_table_view9 AS SELECT * from JSON_TABLE('"a"', '$' COLUMNS (a text PATH '$') EMPTY ARRAY ON ERROR); +-- \sv json_table_view9; DROP VIEW json_table_view8, json_table_view9; diff --git a/crates/squawk_parser/tests/data/regression_suite/sqljson_queryfuncs.sql b/crates/squawk_parser/tests/data/regression_suite/sqljson_queryfuncs.sql index 3a4ee8d8..373eb1cc 100644 --- a/crates/squawk_parser/tests/data/regression_suite/sqljson_queryfuncs.sql +++ b/crates/squawk_parser/tests/data/regression_suite/sqljson_queryfuncs.sql @@ -336,6 +336,7 @@ CREATE TABLE test_jsonb_constraints ( CHECK (JSON_QUERY(js::jsonb, '$.a' RETURNING char(5) OMIT QUOTES EMPTY ARRAY ON EMPTY) > 'a' COLLATE "C") ); +-- \d test_jsonb_constraints SELECT check_clause FROM information_schema.check_constraints @@ -449,6 +450,7 @@ SELECT JSON_VALUE(jsonb '{"a": 123}', '$' || '.' || 'a'); SELECT JSON_VALUE(jsonb '{"a": 123}', '$' || '.' || 'b' DEFAULT 'foo' ON EMPTY); SELECT JSON_QUERY(jsonb '{"a": 123}', '$' || '.' || 'a'); SELECT JSON_QUERY(jsonb '{"a": 123}', '$' || '.' || 'a' WITH WRAPPER); +SELECT JSON_QUERY(jsonb '{"a": 123}', ('$' || '.' || 'a' || NULL)::date WITH WRAPPER); -- Should fail (invalid path) SELECT JSON_QUERY(jsonb '{"a": 123}', 'error' || ' ' || 'error'); @@ -459,6 +461,7 @@ SELECT JSON_QUERY(NULL FORMAT JSON, '$'); -- Test non-const jsonpath CREATE TEMP TABLE jsonpaths (path) AS SELECT '$'; SELECT json_value('"aaa"', path RETURNING json) FROM jsonpaths; +SELECT json_value('"aaa"', jsonpaths RETURNING json) FROM jsonpaths; -- Test PASSING argument parsing SELECT JSON_QUERY(jsonb 'null', '$xyz' PASSING 1 AS xy); diff --git a/crates/squawk_parser/tests/data/regression_suite/stats.sql b/crates/squawk_parser/tests/data/regression_suite/stats.sql index 6eca1a39..3a6395ab 100644 --- a/crates/squawk_parser/tests/data/regression_suite/stats.sql +++ b/crates/squawk_parser/tests/data/regression_suite/stats.sql @@ -9,8 +9,10 @@ SHOW track_counts; -- must be on -- List of backend types, contexts and objects tracked in pg_stat_io. +-- \a SELECT backend_type, object, context FROM pg_stat_io ORDER BY backend_type COLLATE "C", object COLLATE "C", context COLLATE "C"; +-- \a -- ensure that both seqscan and indexscan plans are allowed SET enable_seqscan TO on; @@ -21,7 +23,7 @@ SET enable_indexonlyscan TO off; SET track_functions TO 'all'; -- record dboid for later use -SELECT oid AS dboid from pg_database where datname = current_database() ; +SELECT oid AS dboid from pg_database where datname = current_database() /* \gset */; -- save counters BEGIN; @@ -134,9 +136,9 @@ COMMIT; -- Basic tests for track_functions --- CREATE FUNCTION stats_test_func1() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$; -SELECT 'stats_test_func1()'::regprocedure::oid AS stats_test_func1_oid ; +SELECT 'stats_test_func1()'::regprocedure::oid AS stats_test_func1_oid /* \gset */; CREATE FUNCTION stats_test_func2() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$; -SELECT 'stats_test_func2()'::regprocedure::oid AS stats_test_func2_oid ; +SELECT 'stats_test_func2()'::regprocedure::oid AS stats_test_func2_oid /* \gset */; -- test that stats are accumulated BEGIN; @@ -210,15 +212,15 @@ SELECT pg_stat_get_function_calls('stats_test_func2_oid'); -- by oid after the DROP TABLE. Save oids. CREATE TABLE drop_stats_test(); INSERT INTO drop_stats_test DEFAULT VALUES; -SELECT 'drop_stats_test'::regclass::oid AS drop_stats_test_oid ; +SELECT 'drop_stats_test'::regclass::oid AS drop_stats_test_oid /* \gset */; CREATE TABLE drop_stats_test_xact(); INSERT INTO drop_stats_test_xact DEFAULT VALUES; -SELECT 'drop_stats_test_xact'::regclass::oid AS drop_stats_test_xact_oid ; +SELECT 'drop_stats_test_xact'::regclass::oid AS drop_stats_test_xact_oid /* \gset */; CREATE TABLE drop_stats_test_subxact(); INSERT INTO drop_stats_test_subxact DEFAULT VALUES; -SELECT 'drop_stats_test_subxact'::regclass::oid AS drop_stats_test_subxact_oid ; +SELECT 'drop_stats_test_subxact'::regclass::oid AS drop_stats_test_subxact_oid /* \gset */; SELECT pg_stat_force_next_flush(); @@ -310,8 +312,11 @@ SELECT pg_stat_force_next_flush(); SELECT last_seq_scan, last_idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; COMMIT; +SELECT stats_reset IS NOT NULL AS has_stats_reset + FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; SELECT pg_stat_reset_single_table_counters('test_last_scan'::regclass); -SELECT seq_scan, idx_scan FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; +SELECT seq_scan, idx_scan, stats_reset IS NOT NULL AS has_stats_reset + FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; -- ensure we start out with exactly one index and sequential scan BEGIN; @@ -328,7 +333,7 @@ COMMIT; -- fetch timestamps from before the next test SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass ; +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass /* \gset */; SELECT pg_sleep(0.1); -- assume a minimum timestamp granularity of 100ms -- cause one sequential scan @@ -346,7 +351,7 @@ FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; -- fetch timestamps from before the next test SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass ; +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass /* \gset */; SELECT pg_sleep(0.1); -- cause one index scan @@ -364,7 +369,7 @@ FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; -- fetch timestamps from before the next test SELECT last_seq_scan AS test_last_seq, last_idx_scan AS test_last_idx -FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass ; +FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass /* \gset */; SELECT pg_sleep(0.1); -- cause one bitmap index scan @@ -380,6 +385,17 @@ COMMIT; SELECT seq_scan, 'test_last_seq' = last_seq_scan AS seq_ok, idx_scan, 'test_last_idx' < last_idx_scan AS idx_ok FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; +-- check the stats in pg_stat_all_indexes +SELECT idx_scan, 'test_last_idx' < last_idx_scan AS idx_ok, + stats_reset IS NOT NULL AS has_stats_reset + FROM pg_stat_all_indexes WHERE indexrelid = 'test_last_scan_pkey'::regclass; + +-- check that the stats in pg_stat_all_indexes are reset +SELECT pg_stat_reset_single_table_counters('test_last_scan_pkey'::regclass); + +SELECT idx_scan, stats_reset IS NOT NULL AS has_stats_reset + FROM pg_stat_all_indexes WHERE indexrelid = 'test_last_scan_pkey'::regclass; + ----- -- Test reset of some stats for shared table ----- @@ -388,11 +404,11 @@ FROM pg_stat_all_tables WHERE relid = 'test_last_scan'::regclass; -- pg_shdescription with a fake value, then sets it back to its -- original value. SELECT shobj_description(d.oid, 'pg_database') as description_before - FROM pg_database d WHERE datname = current_database() ; + FROM pg_database d WHERE datname = current_database() /* \gset */; -- force some stats in pg_shdescription. BEGIN; -SELECT current_database() as datname ; +SELECT current_database() as datname /* \gset */; COMMENT ON DATABASE "datname" IS 'This is a test comment'; SELECT pg_stat_force_next_flush(); COMMIT; @@ -405,26 +421,30 @@ SELECT (n_tup_ins + n_tup_upd) > 0 AS has_data FROM pg_stat_all_tables WHERE relid = 'pg_shdescription'::regclass; -- set back comment +-- \if :{?description_before} COMMENT ON DATABASE "datname" IS 'description_before'; +-- \else COMMENT ON DATABASE "datname" IS NULL; +-- \endif ----- -- Test that various stats views are being properly populated ----- -- Test that sessions is incremented when a new session is started in pg_stat_database -SELECT sessions AS db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()) ; +SELECT sessions AS db_stat_sessions FROM pg_stat_database WHERE datname = (SELECT current_database()) /* \gset */; +-- \c SELECT pg_stat_force_next_flush(); SELECT sessions > 'db_stat_sessions' FROM pg_stat_database WHERE datname = (SELECT current_database()); -- Test pg_stat_checkpointer checkpointer-related stats, together with pg_stat_wal -SELECT num_requested AS rqst_ckpts_before FROM pg_stat_checkpointer ; +SELECT num_requested AS rqst_ckpts_before FROM pg_stat_checkpointer /* \gset */; -- Test pg_stat_wal -SELECT wal_bytes AS wal_bytes_before FROM pg_stat_wal ; +SELECT wal_bytes AS wal_bytes_before FROM pg_stat_wal /* \gset */; -- Test pg_stat_get_backend_wal() -SELECT wal_bytes AS backend_wal_bytes_before from pg_stat_get_backend_wal(pg_backend_pid()) ; +SELECT wal_bytes AS backend_wal_bytes_before from pg_stat_get_backend_wal(pg_backend_pid()) /* \gset */; -- Make a temp table so our temp schema exists CREATE TEMP TABLE test_stats_temp AS SELECT 17; @@ -433,8 +453,13 @@ DROP TABLE test_stats_temp; -- Checkpoint twice: The checkpointer reports stats after reporting completion -- of the checkpoint. But after a second checkpoint we'll see at least the -- results of the first. -CHECKPOINT; -CHECKPOINT; +-- +-- While at it, test checkpoint options. Note that we don't test MODE SPREAD +-- because it would prolong the test. +-- CHECKPOINT (WRONG); +-- CHECKPOINT (MODE WRONG); +-- CHECKPOINT (MODE FAST, FLUSH_UNLOGGED FALSE); +-- CHECKPOINT (FLUSH_UNLOGGED); SELECT num_requested > 'rqst_ckpts_before' FROM pg_stat_checkpointer; SELECT wal_bytes > 'wal_bytes_before' FROM pg_stat_wal; @@ -454,11 +479,11 @@ WHERE pg_stat_get_backend_pid(beid) = pg_backend_pid(); ----- -- Test that reset_slru with a specified SLRU works. -SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' ; -SELECT stats_reset AS slru_notify_reset_ts FROM pg_stat_slru WHERE name = 'notify' ; +SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' /* \gset */; +SELECT stats_reset AS slru_notify_reset_ts FROM pg_stat_slru WHERE name = 'notify' /* \gset */; SELECT pg_stat_reset_slru('commit_timestamp'); SELECT stats_reset > 'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'commit_timestamp'; -SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' ; +SELECT stats_reset AS slru_commit_ts_reset_ts FROM pg_stat_slru WHERE name = 'commit_timestamp' /* \gset */; -- Test that multiple SLRUs are reset when no specific SLRU provided to reset function SELECT pg_stat_reset_slru(); @@ -466,32 +491,32 @@ SELECT stats_reset > 'slru_commit_ts_reset_ts'::timestamptz FROM pg_stat_slru WH SELECT stats_reset > 'slru_notify_reset_ts'::timestamptz FROM pg_stat_slru WHERE name = 'notify'; -- Test that reset_shared with archiver specified as the stats type works -SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver ; +SELECT stats_reset AS archiver_reset_ts FROM pg_stat_archiver /* \gset */; SELECT pg_stat_reset_shared('archiver'); SELECT stats_reset > 'archiver_reset_ts'::timestamptz FROM pg_stat_archiver; -- Test that reset_shared with bgwriter specified as the stats type works -SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter ; +SELECT stats_reset AS bgwriter_reset_ts FROM pg_stat_bgwriter /* \gset */; SELECT pg_stat_reset_shared('bgwriter'); SELECT stats_reset > 'bgwriter_reset_ts'::timestamptz FROM pg_stat_bgwriter; -- Test that reset_shared with checkpointer specified as the stats type works -SELECT stats_reset AS checkpointer_reset_ts FROM pg_stat_checkpointer ; +SELECT stats_reset AS checkpointer_reset_ts FROM pg_stat_checkpointer /* \gset */; SELECT pg_stat_reset_shared('checkpointer'); SELECT stats_reset > 'checkpointer_reset_ts'::timestamptz FROM pg_stat_checkpointer; -- Test that reset_shared with recovery_prefetch specified as the stats type works -SELECT stats_reset AS recovery_prefetch_reset_ts FROM pg_stat_recovery_prefetch ; +SELECT stats_reset AS recovery_prefetch_reset_ts FROM pg_stat_recovery_prefetch /* \gset */; SELECT pg_stat_reset_shared('recovery_prefetch'); SELECT stats_reset > 'recovery_prefetch_reset_ts'::timestamptz FROM pg_stat_recovery_prefetch; -- Test that reset_shared with slru specified as the stats type works -SELECT max(stats_reset) AS slru_reset_ts FROM pg_stat_slru ; +SELECT max(stats_reset) AS slru_reset_ts FROM pg_stat_slru /* \gset */; SELECT pg_stat_reset_shared('slru'); SELECT max(stats_reset) > 'slru_reset_ts'::timestamptz FROM pg_stat_slru; -- Test that reset_shared with wal specified as the stats type works -SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal ; +SELECT stats_reset AS wal_reset_ts FROM pg_stat_wal /* \gset */; SELECT pg_stat_reset_shared('wal'); SELECT stats_reset > 'wal_reset_ts'::timestamptz FROM pg_stat_wal; @@ -502,7 +527,7 @@ SELECT pg_stat_reset_shared('unknown'); -- Since pg_stat_database stats_reset starts out as NULL, reset it once first so we have something to compare it to SELECT pg_stat_reset(); -SELECT stats_reset AS db_reset_ts FROM pg_stat_database WHERE datname = (SELECT current_database()) ; +SELECT stats_reset AS db_reset_ts FROM pg_stat_database WHERE datname = (SELECT current_database()) /* \gset */; SELECT pg_stat_reset(); SELECT stats_reset > 'db_reset_ts'::timestamptz FROM pg_stat_database WHERE datname = (SELECT current_database()); @@ -556,7 +581,7 @@ SELECT pg_stat_have_stats('database', 'dboid', 0); -- pg_stat_have_stats returns true for committed index creation CREATE table stats_test_tab1 as select generate_series(1,10) a; CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid ; +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid /* \gset */; SET enable_seqscan TO off; select a from stats_test_tab1 where a = 3; SELECT pg_stat_have_stats('relation', 'dboid', 'stats_test_idx1_oid'); @@ -569,7 +594,7 @@ SELECT pg_stat_have_stats('relation', 'dboid', 'stats_test_idx1_oid'); -- pg_stat_have_stats returns false for rolled back index creation BEGIN; CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid ; +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid /* \gset */; select a from stats_test_tab1 where a = 3; SELECT pg_stat_have_stats('relation', 'dboid', 'stats_test_idx1_oid'); ROLLBACK; @@ -577,14 +602,14 @@ SELECT pg_stat_have_stats('relation', 'dboid', 'stats_test_idx1_oid'); -- pg_stat_have_stats returns true for reindex CONCURRENTLY CREATE index stats_test_idx1 on stats_test_tab1(a); -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid ; +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid /* \gset */; select a from stats_test_tab1 where a = 3; SELECT pg_stat_have_stats('relation', 'dboid', 'stats_test_idx1_oid'); REINDEX index CONCURRENTLY stats_test_idx1; -- false for previous oid SELECT pg_stat_have_stats('relation', 'dboid', 'stats_test_idx1_oid'); -- true for new oid -SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid ; +SELECT 'stats_test_idx1'::regclass::oid AS stats_test_idx1_oid /* \gset */; SELECT pg_stat_have_stats('relation', 'dboid', 'stats_test_idx1_oid'); -- pg_stat_have_stats returns true for a rolled back drop index with stats @@ -617,30 +642,30 @@ SELECT pg_stat_get_subscription_stats(NULL); -- Create a regular table and insert some data to generate IOCONTEXT_NORMAL -- extends. SELECT pid AS checkpointer_pid FROM pg_stat_activity - WHERE backend_type = 'checkpointer' ; + WHERE backend_type = 'checkpointer' /* \gset */; SELECT sum(extends) AS io_sum_shared_before_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' /* \gset */; SELECT sum(extends) AS my_io_sum_shared_before_extends FROM pg_stat_get_backend_io(pg_backend_pid()) - WHERE context = 'normal' AND object = 'relation' ; + WHERE context = 'normal' AND object = 'relation' /* \gset */; SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs FROM pg_stat_io - WHERE object = 'relation' ; -- io_sum_shared_before_ + WHERE object = 'relation' /* /* \gset io_sum_shared_before_ */; */; SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs FROM pg_stat_get_backend_io(pg_backend_pid()) - WHERE object = 'relation' ; -- my_io_sum_shared_before_ + WHERE object = 'relation' /* /* \gset my_io_sum_shared_before_ */; */; SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs FROM pg_stat_io - WHERE context = 'normal' AND object = 'wal' ; -- io_sum_wal_normal_before_ + WHERE context = 'normal' AND object = 'wal' /* /* \gset io_sum_wal_normal_before_ */; */; CREATE TABLE test_io_shared(a int); INSERT INTO test_io_shared SELECT i FROM generate_series(1,100)i; SELECT pg_stat_force_next_flush(); SELECT sum(extends) AS io_sum_shared_after_extends - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' /* \gset */; SELECT 'io_sum_shared_after_extends' > 'io_sum_shared_before_extends'; SELECT sum(extends) AS my_io_sum_shared_after_extends FROM pg_stat_get_backend_io(pg_backend_pid()) - WHERE context = 'normal' AND object = 'relation' ; + WHERE context = 'normal' AND object = 'relation' /* \gset */; SELECT 'my_io_sum_shared_after_extends' > 'my_io_sum_shared_before_extends'; -- After a checkpoint, there should be some additional IOCONTEXT_NORMAL writes @@ -650,19 +675,19 @@ CHECKPOINT; CHECKPOINT; SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs FROM pg_stat_io - WHERE object = 'relation' ; -- io_sum_shared_after_ + WHERE object = 'relation' /* \gset io_sum_shared_after_ */; SELECT 'io_sum_shared_after_writes' > 'io_sum_shared_before_writes'; SELECT current_setting('fsync') = 'off' OR 'io_sum_shared_after_fsyncs' > 'io_sum_shared_before_fsyncs'; SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs FROM pg_stat_get_backend_io(pg_backend_pid()) - WHERE object = 'relation' ; -- my_io_sum_shared_after_ + WHERE object = 'relation' /* \gset my_io_sum_shared_after_ */; SELECT 'my_io_sum_shared_after_writes' >= 'my_io_sum_shared_before_writes'; SELECT current_setting('fsync') = 'off' OR 'my_io_sum_shared_after_fsyncs' >= 'my_io_sum_shared_before_fsyncs'; SELECT sum(writes) AS writes, sum(fsyncs) AS fsyncs FROM pg_stat_io - WHERE context = 'normal' AND object = 'wal' ; -- io_sum_wal_normal_after_ + WHERE context = 'normal' AND object = 'wal' /* \gset io_sum_wal_normal_after_ */; SELECT current_setting('synchronous_commit') = 'on'; SELECT 'io_sum_wal_normal_after_writes' > 'io_sum_wal_normal_before_writes'; SELECT current_setting('fsync') = 'off' @@ -672,7 +697,7 @@ SELECT current_setting('fsync') = 'off' -- Change the tablespace so that the table is rewritten directly, then SELECT -- from it to cause it to be read back into shared buffers. SELECT sum(reads) AS io_sum_shared_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' /* \gset */; -- Do this in a transaction to prevent spurious failures due to concurrent accesses to our newly -- rewritten table, e.g. by autovacuum. BEGIN; @@ -683,11 +708,11 @@ SELECT COUNT(*) FROM test_io_shared; COMMIT; SELECT pg_stat_force_next_flush(); SELECT sum(reads) AS io_sum_shared_after_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' /* \gset */; SELECT 'io_sum_shared_after_reads' > 'io_sum_shared_before_reads'; SELECT sum(hits) AS io_sum_shared_before_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' /* \gset */; -- Select from the table again to count hits. -- Ensure we generate hits by forcing a nested loop self-join with no -- materialize node. The outer side's buffer will stay pinned, preventing its @@ -701,7 +726,7 @@ SELECT COUNT(*) FROM test_io_shared t1 INNER JOIN test_io_shared t2 USING (a); COMMIT; SELECT pg_stat_force_next_flush(); SELECT sum(hits) AS io_sum_shared_after_hits - FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'relation' /* \gset */; SELECT 'io_sum_shared_after_hits' > 'io_sum_shared_before_hits'; DROP TABLE test_io_shared; @@ -715,11 +740,12 @@ DROP TABLE test_io_shared; -- Set temp_buffers to its minimum so that we can trigger writes with fewer -- inserted tuples. Do so in a new session in case temporary tables have been -- accessed by previous tests in this session. +-- \c SET temp_buffers TO 100; CREATE TEMPORARY TABLE test_io_local(a int, b TEXT); SELECT sum(extends) AS extends, sum(evictions) AS evictions, sum(writes) AS writes FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' ; -- io_sum_local_before_ + WHERE context = 'normal' AND object = 'temp relation' /* \gset io_sum_local_before_ */; -- Insert tuples into the temporary table, generating extends in the stats. -- Insert enough values that we need to reuse and write out dirty local -- buffers, generating evictions and writes. @@ -728,7 +754,7 @@ INSERT INTO test_io_local SELECT generate_series(1, 5000) as id, repeat('a', 200 SELECT pg_relation_size('test_io_local') / current_setting('block_size')::int8 > 100; SELECT sum(reads) AS io_sum_local_before_reads - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' /* \gset */; -- Read in evicted buffers, generating reads. SELECT COUNT(*) FROM test_io_local; SELECT pg_stat_force_next_flush(); @@ -737,7 +763,7 @@ SELECT sum(evictions) AS evictions, sum(writes) AS writes, sum(extends) AS extends FROM pg_stat_io - WHERE context = 'normal' AND object = 'temp relation' ; -- io_sum_local_after_ + WHERE context = 'normal' AND object = 'temp relation' /* \gset io_sum_local_after_ */; SELECT 'io_sum_local_after_evictions' > 'io_sum_local_before_evictions', 'io_sum_local_after_reads' > 'io_sum_local_before_reads', 'io_sum_local_after_writes' > 'io_sum_local_before_writes', @@ -749,7 +775,7 @@ SELECT 'io_sum_local_after_evictions' > 'io_sum_local_before_evictions', ALTER TABLE test_io_local SET TABLESPACE regress_tblspace; SELECT pg_stat_force_next_flush(); SELECT sum(writes) AS io_sum_local_new_tblspc_writes - FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' ; + FROM pg_stat_io WHERE context = 'normal' AND object = 'temp relation' /* \gset */; SELECT 'io_sum_local_new_tblspc_writes' > 'io_sum_local_after_writes'; RESET temp_buffers; @@ -768,7 +794,7 @@ RESET temp_buffers; -- reads. SET wal_skip_threshold = '1 kB'; SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' ; -- io_sum_vac_strategy_before_ + FROM pg_stat_io WHERE context = 'vacuum' /* \gset io_sum_vac_strategy_before_ */; CREATE TABLE test_io_vac_strategy(a int, b int) WITH (autovacuum_enabled = 'false'); INSERT INTO test_io_vac_strategy SELECT i, i from generate_series(1, 4500)i; -- Ensure that the next VACUUM will need to perform IO by rewriting the table @@ -779,7 +805,7 @@ VACUUM (FULL) test_io_vac_strategy; VACUUM (PARALLEL 0, BUFFER_USAGE_LIMIT 128) test_io_vac_strategy; SELECT pg_stat_force_next_flush(); SELECT sum(reuses) AS reuses, sum(reads) AS reads, sum(evictions) AS evictions - FROM pg_stat_io WHERE context = 'vacuum' ; -- io_sum_vac_strategy_after_ + FROM pg_stat_io WHERE context = 'vacuum' /* \gset io_sum_vac_strategy_after_ */; SELECT 'io_sum_vac_strategy_after_reads' > 'io_sum_vac_strategy_before_reads'; SELECT ('io_sum_vac_strategy_after_reuses' + 'io_sum_vac_strategy_after_evictions') > ('io_sum_vac_strategy_before_reuses' + 'io_sum_vac_strategy_before_evictions'); @@ -788,31 +814,31 @@ RESET wal_skip_threshold; -- Test that extends done by a CTAS, which uses a BAS_BULKWRITE -- BufferAccessStrategy, are tracked in pg_stat_io. SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_before - FROM pg_stat_io WHERE context = 'bulkwrite' ; + FROM pg_stat_io WHERE context = 'bulkwrite' /* \gset */; CREATE TABLE test_io_bulkwrite_strategy AS SELECT i FROM generate_series(1,100)i; SELECT pg_stat_force_next_flush(); SELECT sum(extends) AS io_sum_bulkwrite_strategy_extends_after - FROM pg_stat_io WHERE context = 'bulkwrite' ; + FROM pg_stat_io WHERE context = 'bulkwrite' /* \gset */; SELECT 'io_sum_bulkwrite_strategy_extends_after' > 'io_sum_bulkwrite_strategy_extends_before'; -- Test IO stats reset SELECT pg_stat_have_stats('io', 0, 0); SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_pre_reset - FROM pg_stat_io ; + FROM pg_stat_io /* \gset */; SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS my_io_stats_pre_reset - FROM pg_stat_get_backend_io(pg_backend_pid()) ; + FROM pg_stat_get_backend_io(pg_backend_pid()) /* \gset */; SELECT pg_stat_reset_shared('io'); SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS io_stats_post_reset - FROM pg_stat_io ; + FROM pg_stat_io /* \gset */; SELECT 'io_stats_post_reset' < 'io_stats_pre_reset'; SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS my_io_stats_post_reset - FROM pg_stat_get_backend_io(pg_backend_pid()) ; + FROM pg_stat_get_backend_io(pg_backend_pid()) /* \gset */; -- pg_stat_reset_shared() did not reset backend IO stats SELECT 'my_io_stats_pre_reset' <= 'my_io_stats_post_reset'; -- but pg_stat_reset_backend_stats() does SELECT pg_stat_reset_backend_stats(pg_backend_pid()); SELECT sum(evictions) + sum(reuses) + sum(extends) + sum(fsyncs) + sum(reads) + sum(writes) + sum(writebacks) + sum(hits) AS my_io_stats_post_backend_reset - FROM pg_stat_get_backend_io(pg_backend_pid()) ; + FROM pg_stat_get_backend_io(pg_backend_pid()) /* \gset */; SELECT 'my_io_stats_pre_reset' > 'my_io_stats_post_backend_reset'; -- Check invalid input for pg_stat_get_backend_io() @@ -859,6 +885,7 @@ UPDATE brin_hot SET val = -3 WHERE id = 42; -- in pgstat_report_stat(). But instead of waiting for the rate limiter's -- timeout to elapse, let's just start a new session. The old one will -- then send its stats before dying. +-- \c - SELECT wait_for_hot_stats(); SELECT pg_stat_get_tuples_hot_updated('brin_hot'::regclass::oid); diff --git a/crates/squawk_parser/tests/data/regression_suite/stats_ext.sql b/crates/squawk_parser/tests/data/regression_suite/stats_ext.sql index 05798f8f..f42c6bec 100644 --- a/crates/squawk_parser/tests/data/regression_suite/stats_ext.sql +++ b/crates/squawk_parser/tests/data/regression_suite/stats_ext.sql @@ -40,6 +40,18 @@ CREATE STATISTICS tst ON x, x, y, x, x, (x || 'x'), (y + 1), (x || 'x'), (x || ' CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1), (x || 'x'), (x || 'x'), (y + 1) FROM ext_stats_test; CREATE STATISTICS tst ON (x || 'x'), (x || 'x'), y FROM ext_stats_test; CREATE STATISTICS tst (unrecognized) ON x, y FROM ext_stats_test; +-- unsupported targets +-- CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo; +-- CREATE STATISTICS tst ON a FROM foo NATURAL JOIN bar; +-- CREATE STATISTICS tst ON a FROM (SELECT * FROM ext_stats_test) AS foo; +-- CREATE STATISTICS tst ON a FROM ext_stats_test s TABLESAMPLE system (x); +-- CREATE STATISTICS tst ON a FROM XMLTABLE('foo' PASSING 'bar' COLUMNS a text); +-- CREATE STATISTICS tst ON a FROM JSON_TABLE(jsonb '123', '$' COLUMNS (item int)); +CREATE FUNCTION tftest(int) returns table(a int, b int) as $$ +SELECT $1, $1+i FROM generate_series(1,5) g(i); +$$ LANGUAGE sql IMMUTABLE STRICT; +-- CREATE STATISTICS alt_stat2 ON a FROM tftest(1); +DROP FUNCTION tftest; -- incorrect expressions CREATE STATISTICS tst ON (y) FROM ext_stats_test; -- single column reference CREATE STATISTICS tst ON y + z FROM ext_stats_test; -- missing parentheses @@ -71,6 +83,14 @@ DROP STATISTICS ab1_a_b_stats; ALTER STATISTICS ab1_a_b_stats RENAME TO ab1_a_b_stats_new; RESET SESSION AUTHORIZATION; DROP ROLE regress_stats_ext; +CREATE STATISTICS pg_temp.stats_ext_temp ON a, b FROM ab1; +SELECT regexp_replace(pg_describe_object(tableoid, oid, 0), + 'pg_temp_[0-9]*', 'pg_temp_REDACTED') AS descr, + pg_statistics_obj_is_visible(oid) AS visible + FROM pg_statistic_ext + WHERE stxname = 'stats_ext_temp'; +DROP STATISTICS stats_ext_temp; -- shall fail +DROP STATISTICS pg_temp.stats_ext_temp; CREATE STATISTICS IF NOT EXISTS ab1_a_b_stats ON a, b FROM ab1; DROP STATISTICS ab1_a_b_stats; @@ -88,6 +108,7 @@ CREATE STATISTICS ab1_b_c_stats ON b, c FROM ab1; CREATE STATISTICS ab1_a_b_c_stats ON a, b, c FROM ab1; CREATE STATISTICS ab1_b_a_stats ON b, a FROM ab1; ALTER TABLE ab1 DROP COLUMN a; +-- \d ab1 -- Ensure statistics are dropped when table is SELECT stxname FROM pg_statistic_ext WHERE stxname LIKE 'ab1%'; DROP TABLE ab1; @@ -102,11 +123,16 @@ ANALYZE ab1; ALTER TABLE ab1 ALTER a SET STATISTICS -1; -- setting statistics target 0 skips the statistics, without printing any message, so check catalog ALTER STATISTICS ab1_a_b_stats SET STATISTICS 0; +-- \d ab1 ANALYZE ab1; -SELECT stxname, stxdndistinct, stxddependencies, stxdmcv, stxdinherit +SELECT stxname, + replace(d.stxdndistinct, '}, ', E'},\n') AS stxdndistinct, + replace(d.stxddependencies, '}, ', E'},\n') AS stxddependencies, + stxdmcv, stxdinherit FROM pg_statistic_ext s LEFT JOIN pg_statistic_ext_data d ON (d.stxoid = s.oid) WHERE s.stxname = 'ab1_a_b_stats'; ALTER STATISTICS ab1_a_b_stats SET STATISTICS -1; +-- \d+ ab1 -- partial analyze doesn't build stats either ANALYZE ab1 (a); ANALYZE ab1; @@ -274,7 +300,7 @@ CREATE STATISTICS s10 ON a, b, c FROM ndistinct; ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct +SELECT s.stxkind, replace(d.stxdndistinct, '}, ', E'},\n') AS stxdndistinct FROM pg_statistic_ext s, pg_statistic_ext_data d WHERE s.stxrelid = 'ndistinct'::regclass AND d.stxoid = s.oid; @@ -315,7 +341,7 @@ INSERT INTO ndistinct (a, b, c, filler1) ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct +SELECT s.stxkind, replace(d.stxdndistinct, '}, ', E'},\n') AS stxdndistinct FROM pg_statistic_ext s, pg_statistic_ext_data d WHERE s.stxrelid = 'ndistinct'::regclass AND d.stxoid = s.oid; @@ -341,7 +367,7 @@ SELECT * FROM check_estimated_rows('SELECT COUNT(*) FROM ndistinct GROUP BY a, ( DROP STATISTICS s10; -SELECT s.stxkind, d.stxdndistinct +SELECT s.stxkind, replace(d.stxdndistinct, '}, ', E'},\n') AS stxdndistinct FROM pg_statistic_ext s, pg_statistic_ext_data d WHERE s.stxrelid = 'ndistinct'::regclass AND d.stxoid = s.oid; @@ -376,7 +402,7 @@ CREATE STATISTICS s10 (ndistinct) ON (a+1), (b+100), (2*c) FROM ndistinct; ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct +SELECT s.stxkind, replace(d.stxdndistinct, '}, ', E'},\n') AS stxdndistinct FROM pg_statistic_ext s, pg_statistic_ext_data d WHERE s.stxrelid = 'ndistinct'::regclass AND d.stxoid = s.oid; @@ -400,7 +426,7 @@ CREATE STATISTICS s10 (ndistinct) ON a, b, (2*c) FROM ndistinct; ANALYZE ndistinct; -SELECT s.stxkind, d.stxdndistinct +SELECT s.stxkind, replace(d.stxdndistinct, '}, ', E'},\n') AS stxdndistinct FROM pg_statistic_ext s, pg_statistic_ext_data d WHERE s.stxrelid = 'ndistinct'::regclass AND d.stxoid = s.oid; @@ -685,7 +711,7 @@ CREATE STATISTICS func_deps_stat (dependencies) ON a, b, c FROM functional_depen ANALYZE functional_dependencies; -- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; +SELECT replace(dependencies, '}, ', E'},\n') AS dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE a = 1 AND b = ''1'''); @@ -821,7 +847,7 @@ CREATE STATISTICS func_deps_stat (dependencies) ON (a * 2), upper(b), (c + 1) FR ANALYZE functional_dependencies; -- print the detected dependencies -SELECT dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; +SELECT replace(dependencies, '}, ', E'},\n') AS dependencies FROM pg_stats_ext WHERE statistics_name = 'func_deps_stat'; SELECT * FROM check_estimated_rows('SELECT * FROM functional_dependencies WHERE (a * 2) = 2 AND upper(b) = ''1'''); @@ -1599,18 +1625,28 @@ insert into stts_t1 select i,i from generate_series(1,100) i; analyze stts_t1; set search_path to public, stts_s1, stts_s2, tststats; +-- \dX +-- \dX stts_t* +-- \dX *stts_hoge +-- \dX+ +-- \dX+ stts_t* +-- \dX+ *stts_hoge +-- \dX+ stts_s2.stts_yama create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; create statistics (mcv) ON a, b, (a+b), (a-b) FROM stts_t1; create statistics (mcv) ON (a+b), (a-b) FROM stts_t1; +-- \dX stts_t*expr* drop statistics stts_t1_a_b_expr_expr_stat; drop statistics stts_t1_a_b_expr_expr_stat1; drop statistics stts_t1_expr_expr_stat; set search_path to public, stts_s1; +-- \dX create role regress_stats_ext nosuperuser; set role regress_stats_ext; +-- \dX reset role; drop table stts_t1, stts_t2, stts_t3; @@ -1634,8 +1670,15 @@ CREATE FUNCTION op_leak(int, int) RETURNS bool LANGUAGE plpgsql; CREATE OPERATOR <<< (procedure = op_leak, leftarg = int, rightarg = int, restrict = scalarltsel); +CREATE FUNCTION op_leak(record, record) RETURNS bool + AS 'BEGIN RAISE NOTICE ''op_leak => %, %'', $1, $2; RETURN $1 < $2; END' + LANGUAGE plpgsql; +CREATE OPERATOR <<< (procedure = op_leak, leftarg = record, rightarg = record, + restrict = scalarltsel); SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied +SELECT * FROM tststats.priv_test_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied -- Grant access via a security barrier view, but hide all data @@ -1648,19 +1691,51 @@ GRANT SELECT, DELETE ON tststats.priv_test_view TO regress_stats_user1; SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak SELECT * FROM tststats.priv_test_view WHERE a <<< 0 OR b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_view t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak DELETE FROM tststats.priv_test_view WHERE a <<< 0 AND b <<< 0; -- Should not leak -- Grant table access, but hide all data with RLS RESET SESSION AUTHORIZATION; ALTER TABLE tststats.priv_test_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY priv_test_tbl_pol ON tststats.priv_test_tbl USING (2 * a < 0); GRANT SELECT, DELETE ON tststats.priv_test_tbl TO regress_stats_user1; -- Should now have direct table access, but see nothing and leak nothing SET SESSION AUTHORIZATION regress_stats_user1; SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak -SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; +SELECT * FROM tststats.priv_test_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak DELETE FROM tststats.priv_test_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +-- Create plain inheritance parent table with no access permissions +RESET SESSION AUTHORIZATION; +CREATE TABLE tststats.priv_test_parent_tbl (a int, b int); +ALTER TABLE tststats.priv_test_tbl INHERIT tststats.priv_test_parent_tbl; + +-- Should not have access to parent, and should leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Permission denied +SELECT * FROM tststats.priv_test_parent_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Permission denied +DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Permission denied + +-- Grant table access to parent, but hide all data with RLS +RESET SESSION AUTHORIZATION; +ALTER TABLE tststats.priv_test_parent_tbl ENABLE ROW LEVEL SECURITY; +CREATE POLICY priv_test_parent_tbl_pol ON tststats.priv_test_parent_tbl USING (2 * a < 0); +GRANT SELECT, DELETE ON tststats.priv_test_parent_tbl TO regress_stats_user1; + +-- Should now have direct table access to parent, but see nothing and leak nothing +SET SESSION AUTHORIZATION regress_stats_user1; +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_parent_tbl WHERE a <<< 0 OR b <<< 0; -- Should not leak +SELECT * FROM tststats.priv_test_parent_tbl t + WHERE a <<< 0 AND (b <<< 0 OR t.* <<< (1, 1) IS NOT NULL); -- Should not leak +DELETE FROM tststats.priv_test_parent_tbl WHERE a <<< 0 AND b <<< 0; -- Should not leak + -- privilege checks for pg_stats_ext and pg_stats_ext_exprs RESET SESSION AUTHORIZATION; CREATE TABLE stats_ext_tbl (id INT PRIMARY KEY GENERATED BY DEFAULT AS IDENTITY, col TEXT); @@ -1687,12 +1762,48 @@ SELECT statistics_name, most_common_vals FROM pg_stats_ext x SELECT statistics_name, most_common_vals FROM pg_stats_ext_exprs x WHERE tablename = 'stats_ext_tbl' ORDER BY ROW(x.*); +-- CREATE STATISTICS checks for CREATE on the schema +RESET SESSION AUTHORIZATION; +CREATE SCHEMA sts_sch1 CREATE TABLE sts_sch1.tbl (a INT, b INT, c INT GENERATED ALWAYS AS (b * 2) STORED); +CREATE SCHEMA sts_sch2; +GRANT USAGE ON SCHEMA sts_sch1, sts_sch2 TO regress_stats_user1; +ALTER TABLE sts_sch1.tbl OWNER TO regress_stats_user1; +SET SESSION AUTHORIZATION regress_stats_user1; +CREATE STATISTICS ON a, b, c FROM sts_sch1.tbl; +CREATE STATISTICS sts_sch2.fail ON a, b, c FROM sts_sch1.tbl; +RESET SESSION AUTHORIZATION; +GRANT CREATE ON SCHEMA sts_sch1 TO regress_stats_user1; +SET SESSION AUTHORIZATION regress_stats_user1; +CREATE STATISTICS ON a, b, c FROM sts_sch1.tbl; +CREATE STATISTICS sts_sch2.fail ON a, b, c FROM sts_sch1.tbl; +RESET SESSION AUTHORIZATION; +REVOKE CREATE ON SCHEMA sts_sch1 FROM regress_stats_user1; +GRANT CREATE ON SCHEMA sts_sch2 TO regress_stats_user1; +SET SESSION AUTHORIZATION regress_stats_user1; +CREATE STATISTICS ON a, b, c FROM sts_sch1.tbl; +CREATE STATISTICS sts_sch2.pass1 ON a, b, c FROM sts_sch1.tbl; +RESET SESSION AUTHORIZATION; +GRANT CREATE ON SCHEMA sts_sch1, sts_sch2 TO regress_stats_user1; +SET SESSION AUTHORIZATION regress_stats_user1; +CREATE STATISTICS ON a, b, c FROM sts_sch1.tbl; +CREATE STATISTICS sts_sch2.pass2 ON a, b, c FROM sts_sch1.tbl; + +-- re-creating statistics via ALTER TABLE bypasses checks for CREATE on schema +RESET SESSION AUTHORIZATION; +REVOKE CREATE ON SCHEMA sts_sch1, sts_sch2 FROM regress_stats_user1; +SET SESSION AUTHORIZATION regress_stats_user1; +ALTER TABLE sts_sch1.tbl ALTER COLUMN a TYPE SMALLINT; +ALTER TABLE sts_sch1.tbl ALTER COLUMN c SET EXPRESSION AS (a * 3); + -- Tidy up DROP OPERATOR <<< (int, int); DROP FUNCTION op_leak(int, int); +DROP OPERATOR <<< (record, record); +DROP FUNCTION op_leak(record, record); RESET SESSION AUTHORIZATION; DROP TABLE stats_ext_tbl; DROP SCHEMA tststats CASCADE; +DROP SCHEMA sts_sch1, sts_sch2 CASCADE; DROP USER regress_stats_user1; CREATE TABLE grouping_unique (x integer); @@ -1745,4 +1856,13 @@ SELECT FROM sb_1 LEFT JOIN sb_2 RESET enable_nestloop; RESET enable_mergejoin; +-- Check that we can use statistics on a bool-valued function. +SELECT * FROM check_estimated_rows('SELECT * FROM sb_2 WHERE numeric_lt(y, 1.0)'); + +CREATE STATISTICS extstat_sb_2_small ON numeric_lt(y, 1.0) FROM sb_2; +ANALYZE sb_2; + +SELECT * FROM check_estimated_rows('SELECT * FROM sb_2 WHERE numeric_lt(y, 1.0)'); + +-- Tidy up DROP TABLE sb_1, sb_2 CASCADE; diff --git a/crates/squawk_parser/tests/data/regression_suite/stats_rewrite.sql b/crates/squawk_parser/tests/data/regression_suite/stats_rewrite.sql new file mode 100644 index 00000000..bbe41db5 --- /dev/null +++ b/crates/squawk_parser/tests/data/regression_suite/stats_rewrite.sql @@ -0,0 +1,220 @@ +-- +-- Test cumulative statistics with relation rewrites +-- + +-- Two-phase commit. +-- Table-level stats with VACUUM and rewrite after 2PC commit. +CREATE TABLE test_2pc_timestamp (a int) WITH (autovacuum_enabled = false); +VACUUM ANALYZE test_2pc_timestamp; +SELECT last_analyze AS last_vacuum_analyze + FROM pg_stat_all_tables WHERE relname = 'test_2pc_timestamp' /* \gset */; +BEGIN; +ALTER TABLE test_2pc_timestamp ALTER COLUMN a TYPE int; +PREPARE TRANSACTION 'test'; +COMMIT PREPARED 'test'; +SELECT pg_stat_force_next_flush(); +SELECT last_analyze = 'last_vacuum_analyze'::timestamptz AS same_vacuum_ts + FROM pg_stat_all_tables WHERE relname = 'test_2pc_timestamp'; +DROP TABLE test_2pc_timestamp; + +-- Table-level stats with single rewrite after 2PC commit. +CREATE TABLE test_2pc_rewrite_alone (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_2pc_rewrite_alone VALUES (1); +BEGIN; +ALTER TABLE test_2pc_rewrite_alone ALTER COLUMN a TYPE bigint; +PREPARE TRANSACTION 'test'; +COMMIT PREPARED 'test'; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_2pc_rewrite_alone'; +DROP TABLE test_2pc_rewrite_alone; + +-- Table-level stats with rewrite and DMLs after 2PC commit. +CREATE TABLE test_2pc (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_2pc VALUES (1); +BEGIN; +INSERT INTO test_2pc VALUES (1); +INSERT INTO test_2pc VALUES (2); +INSERT INTO test_2pc VALUES (3); +ALTER TABLE test_2pc ALTER COLUMN a TYPE bigint; +PREPARE TRANSACTION 'test'; +COMMIT PREPARED 'test'; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_2pc'; +DROP TABLE test_2pc; + +-- Table-level stats with multiple rewrites after 2PC commit. +CREATE TABLE test_2pc_multi (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_2pc_multi VALUES (1); +BEGIN; +INSERT INTO test_2pc_multi VALUES (1); +INSERT INTO test_2pc_multi VALUES (2); +ALTER TABLE test_2pc_multi ALTER COLUMN a TYPE bigint; +INSERT INTO test_2pc_multi VALUES (3); +INSERT INTO test_2pc_multi VALUES (4); +ALTER TABLE test_2pc_multi ALTER COLUMN a TYPE int; +INSERT INTO test_2pc_multi VALUES (5); +PREPARE TRANSACTION 'test'; +COMMIT PREPARED 'test'; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_2pc_multi'; +DROP TABLE test_2pc_multi; + +-- Table-level stats with single rewrite after 2PC abort. +CREATE TABLE test_2pc_rewrite_alone_abort (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_2pc_rewrite_alone_abort VALUES (1); +BEGIN; +ALTER TABLE test_2pc_rewrite_alone_abort ALTER COLUMN a TYPE bigint; +PREPARE TRANSACTION 'test'; +ROLLBACK PREPARED 'test'; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_2pc_rewrite_alone_abort'; +DROP TABLE test_2pc_rewrite_alone_abort; + +-- Table-level stats with rewrite and DMLs after 2PC abort. +CREATE TABLE test_2pc_abort (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_2pc_abort VALUES (1); +BEGIN; +INSERT INTO test_2pc_abort VALUES (1); +INSERT INTO test_2pc_abort VALUES (2); +ALTER TABLE test_2pc_abort ALTER COLUMN a TYPE bigint; +INSERT INTO test_2pc_abort VALUES (3); +PREPARE TRANSACTION 'test'; +ROLLBACK PREPARED 'test'; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_2pc_abort'; +DROP TABLE test_2pc_abort; + +-- Table-level stats with rewrites and subtransactions after 2PC commit. +CREATE TABLE test_2pc_savepoint (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_2pc_savepoint VALUES (1); +BEGIN; +SAVEPOINT a; +INSERT INTO test_2pc_savepoint VALUES (1); +INSERT INTO test_2pc_savepoint VALUES (2); +ALTER TABLE test_2pc_savepoint ALTER COLUMN a TYPE bigint; +SAVEPOINT b; +INSERT INTO test_2pc_savepoint VALUES (3); +ALTER TABLE test_2pc_savepoint ALTER COLUMN a TYPE int; +SAVEPOINT c; +INSERT INTO test_2pc_savepoint VALUES (4); +INSERT INTO test_2pc_savepoint VALUES (5); +ROLLBACK TO SAVEPOINT b; +PREPARE TRANSACTION 'test'; +COMMIT PREPARED 'test'; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_2pc_savepoint'; +DROP TABLE test_2pc_savepoint; + +-- Table-level stats with single rewrite and VACUUM +CREATE TABLE test_timestamp (a int) WITH (autovacuum_enabled = false); +VACUUM ANALYZE test_timestamp; +SELECT last_analyze AS last_vacuum_analyze + FROM pg_stat_all_tables WHERE relname = 'test_timestamp' /* \gset */; +ALTER TABLE test_timestamp ALTER COLUMN a TYPE bigint; +SELECT pg_stat_force_next_flush(); +SELECT last_analyze = 'last_vacuum_analyze'::timestamptz AS same_vacuum_ts + FROM pg_stat_all_tables WHERE relname = 'test_timestamp'; +DROP TABLE test_timestamp; + +-- Table-level stats with single rewrite. +CREATE TABLE test_alone (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_alone VALUES (1); +BEGIN; +ALTER TABLE test_alone ALTER COLUMN a TYPE bigint; +COMMIT; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_alone'; +DROP TABLE test_alone; + +-- Table-level stats with rewrite and DMLs. +CREATE TABLE test (a int) WITH (autovacuum_enabled = false); +INSERT INTO test VALUES (1); +BEGIN; +INSERT INTO test VALUES (1); +INSERT INTO test VALUES (2); +INSERT INTO test VALUES (3); +ALTER TABLE test ALTER COLUMN a TYPE bigint; +COMMIT; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test'; +DROP TABLE test; + +-- Table-level stats with multiple rewrites and DMLs. +CREATE TABLE test_multi (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_multi VALUES (1); +BEGIN; +INSERT INTO test_multi VALUES (1); +INSERT INTO test_multi VALUES (2); +ALTER TABLE test_multi ALTER COLUMN a TYPE bigint; +INSERT INTO test_multi VALUES (3); +INSERT INTO test_multi VALUES (4); +ALTER TABLE test_multi ALTER COLUMN a TYPE int; +INSERT INTO test_multi VALUES (5); +COMMIT; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_multi'; +DROP TABLE test_multi; + +-- Table-level stats with rewrite and rollback. +CREATE TABLE test_rewrite_alone_abort (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_rewrite_alone_abort VALUES (1); +BEGIN; +ALTER TABLE test_rewrite_alone_abort ALTER COLUMN a TYPE bigint; +ROLLBACK; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_rewrite_alone_abort'; +DROP TABLE test_rewrite_alone_abort; + +-- Table-level stats with rewrite, DMLs and rollback. +CREATE TABLE test_abort (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_abort VALUES (1); +BEGIN; +INSERT INTO test_abort VALUES (1); +INSERT INTO test_abort VALUES (2); +ALTER TABLE test_abort ALTER COLUMN a TYPE bigint; +INSERT INTO test_abort VALUES (3); +ROLLBACK; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_abort'; +DROP TABLE test_abort; + +-- Table-level stats with rewrites and subtransactions. +CREATE TABLE test_savepoint (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_savepoint VALUES (1); +BEGIN; +SAVEPOINT a; +INSERT INTO test_savepoint VALUES (1); +INSERT INTO test_savepoint VALUES (2); +ALTER TABLE test_savepoint ALTER COLUMN a TYPE bigint; +SAVEPOINT b; +INSERT INTO test_savepoint VALUES (3); +ALTER TABLE test_savepoint ALTER COLUMN a TYPE int; +SAVEPOINT c; +INSERT INTO test_savepoint VALUES (4); +INSERT INTO test_savepoint VALUES (5); +ROLLBACK TO SAVEPOINT b; +COMMIT; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_savepoint'; +DROP TABLE test_savepoint; + +-- Table-level stats with tablespace rewrite. +CREATE TABLE test_tbs (a int) WITH (autovacuum_enabled = false); +INSERT INTO test_tbs VALUES (1); +ALTER TABLE test_tbs SET TABLESPACE pg_default; +SELECT pg_stat_force_next_flush(); +SELECT n_tup_ins, n_live_tup, n_dead_tup + FROM pg_stat_all_tables WHERE relname = 'test_tbs'; +DROP TABLE test_tbs; diff --git a/crates/squawk_parser/tests/data/regression_suite/strings.sql b/crates/squawk_parser/tests/data/regression_suite/strings.sql index 5ea3ba3f..b86dc6eb 100644 --- a/crates/squawk_parser/tests/data/regression_suite/strings.sql +++ b/crates/squawk_parser/tests/data/regression_suite/strings.sql @@ -10,11 +10,11 @@ SELECT 'first line' ' - third line' AS "Three lines to one"; --- -- illegal string continuation syntax --- SELECT 'first line' --- ' - next line' /* this comment is not allowed here */ --- ' - third line' --- AS "Illegal comment within continuation"; +-- illegal string continuation syntax +SELECT 'first line' +' - next line' /* this comment is not allowed here */ +' - third line' + AS "Illegal comment within continuation"; -- Unicode escapes SET standard_conforming_strings TO on; @@ -76,11 +76,28 @@ SELECT E'De\\000dBeEf'::bytea; SELECT E'De\123dBeEf'::bytea; SELECT E'De\\123dBeEf'::bytea; SELECT E'De\\678dBeEf'::bytea; +SELECT E'DeAd\\\\BeEf'::bytea; SELECT reverse(''::bytea); SELECT reverse('\xaa'::bytea); SELECT reverse('\xabcd'::bytea); +SELECT ('\x' || repeat(' ', 32))::bytea; +SELECT ('\x' || repeat('!', 32))::bytea; +SELECT ('\x' || repeat('/', 34))::bytea; +SELECT ('\x' || repeat('0', 34))::bytea; +SELECT ('\x' || repeat('9', 32))::bytea; +SELECT ('\x' || repeat(':', 32))::bytea; +SELECT ('\x' || repeat('@', 34))::bytea; +SELECT ('\x' || repeat('A', 34))::bytea; +SELECT ('\x' || repeat('F', 32))::bytea; +SELECT ('\x' || repeat('G', 32))::bytea; +SELECT ('\x' || repeat('`', 34))::bytea; +SELECT ('\x' || repeat('a', 34))::bytea; +SELECT ('\x' || repeat('f', 32))::bytea; +SELECT ('\x' || repeat('g', 32))::bytea; +SELECT ('\x' || repeat('~', 34))::bytea; + SET bytea_output TO escape; SELECT E'\\xDeAdBeEf'::bytea; SELECT E'\\x De Ad Be Ef '::bytea; @@ -88,6 +105,7 @@ SELECT E'\\xDe00BeEf'::bytea; SELECT E'DeAdBeEf'::bytea; SELECT E'De\\000dBeEf'::bytea; SELECT E'De\\123dBeEf'::bytea; +SELECT E'DeAd\\\\BeEf'::bytea; -- Test non-error-throwing API too SELECT pg_input_is_valid(E'\\xDeAdBeE', 'bytea'); @@ -197,6 +215,29 @@ SELECT 'abcd\efg' SIMILAR TO '_bcd\%' ESCAPE '' AS true; SELECT 'abcdefg' SIMILAR TO '_bcd%' ESCAPE NULL AS null; SELECT 'abcdefg' SIMILAR TO '_bcd#%' ESCAPE '##' AS error; +-- Characters that should be left alone in character classes when a +-- SIMILAR TO regexp pattern is converted to POSIX style. +-- Underscore "_" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '_[_[:alpha:]_]_'; +-- Percentage "%" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '%[%[:alnum:]%]%'; +-- Dot "." +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '.[.[:alnum:].].'; +-- Dollar "$" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '$[$[:alnum:]$]$'; +-- Opening parenthesis "(" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '()[([:alnum:](]()'; +-- Caret "^" +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '^[^[:alnum:]^[^^][[^^]][\^][[\^]]\^]^'; +-- Closing square bracket "]" at the beginning of character class +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[]%][^]%][^%]%'; +-- Closing square bracket effective after two carets at the beginning +-- of character class. +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[^^]^'; +-- Closing square bracket after an escape sequence at the beginning of +-- a character closes the character class +EXPLAIN (COSTS OFF) SELECT * FROM TEXT_TBL WHERE f1 SIMILAR TO '[|a]%' ESCAPE '|'; + -- Test backslash escapes in regexp_replace's replacement string SELECT regexp_replace('1112223333', E'(\\d{3})(\\d{3})(\\d{4})', E'(\\1) \\2-\\3'); SELECT regexp_replace('foobarrbazz', E'(.)\\1', E'X\\&Y', 'g'); @@ -298,6 +339,7 @@ SELECT regexp_substr('abcabcabc', 'a.c', 1, 1, 'g'); SELECT regexp_substr('abcabcabc', 'a.c', 1, 1, '', -1); -- set so we can tell NULL from empty string +-- \pset null '\\N' -- return all matches from regexp SELECT regexp_matches('foobarbequebaz', $re$(bar)(beque)$re$); @@ -359,6 +401,7 @@ SELECT foo, length(foo) FROM regexp_split_to_table('thE QUick bROWn FOx jUMPs ov SELECT regexp_split_to_array('thE QUick bROWn FOx jUMPs ovEr The lazy dOG', 'e', 'g'); -- change NULL-display back +-- \pset null '' -- E021-11 position expression SELECT POSITION('4' IN '1234567890') = '4' AS "4"; @@ -626,6 +669,26 @@ SELECT length(c), c::text FROM toasttest; SELECT c FROM toasttest; DROP TABLE toasttest; +-- test with short varlenas (up to 126 data bytes reduced to a 1-byte header) +-- being toasted. +CREATE TABLE toasttest (f1 text, f2 text); +ALTER TABLE toasttest SET (toast_tuple_target = 128); +ALTER TABLE toasttest ALTER COLUMN f1 SET STORAGE EXTERNAL; +ALTER TABLE toasttest ALTER COLUMN f2 SET STORAGE EXTERNAL; +-- Here, the first value is a varlena large enough to make it toasted and +-- stored uncompressed. The second value is a short varlena, toasted +-- and stored uncompressed. +INSERT INTO toasttest values(repeat('1234', 1000), repeat('5678', 30)); +SELECT reltoastrelid::regclass AS reltoastname FROM pg_class + WHERE oid = 'toasttest'::regclass /* \gset */; +-- There should be two values inserted in the toast relation. +SELECT count(*) FROM reltoastname WHERE chunk_seq = 0; +SELECT substr(f1, 5, 10) AS f1_data, substr(f2, 5, 10) AS f2_data + FROM toasttest; +SELECT pg_column_compression(f1) AS f1_comp, pg_column_compression(f2) AS f2_comp + FROM toasttest; +DROP TABLE toasttest; + -- -- test length -- @@ -752,6 +815,64 @@ SELECT decode(encode(('\x' || repeat('1234567890abcdef0001', 7))::bytea, SELECT encode('\x1234567890abcdef00', 'escape'); SELECT decode(encode('\x1234567890abcdef00', 'escape'), 'escape'); +-- report an error with a hint listing valid encodings when an invalid encoding is specified +SELECT encode('\x01'::bytea, 'invalid'); -- error +SELECT decode('00', 'invalid'); -- error + +-- +-- base64url encoding/decoding +-- +SET bytea_output TO hex; + +-- Simple encoding/decoding +SELECT encode('\x69b73eff', 'base64url'); -- abc-_w +SELECT decode('abc-_w', 'base64url'); -- \x69b73eff + +-- Round-trip: decode(encode(x)) = x +SELECT decode(encode('\x1234567890abcdef00', 'base64url'), 'base64url'); -- \x1234567890abcdef00 + +-- Empty input +SELECT encode('', 'base64url'); -- '' +SELECT decode('', 'base64url'); -- '' + +-- 1 byte input +SELECT encode('\x01', 'base64url'); -- AQ +SELECT decode('AQ', 'base64url'); -- \x01 + +-- 2 byte input +SELECT encode('\x0102'::bytea, 'base64url'); -- AQI +SELECT decode('AQI', 'base64url'); -- \x0102 + +-- 3 byte input (no padding needed) +SELECT encode('\x010203'::bytea, 'base64url'); -- AQID +SELECT decode('AQID', 'base64url'); -- \x010203 + +-- 4 byte input (results in 6 base64 chars) +SELECT encode('\xdeadbeef'::bytea, 'base64url'); -- 3q2-7w +SELECT decode('3q2-7w', 'base64url'); -- \xdeadbeef + +-- Round-trip test for all lengths from 0–4 +SELECT encode(decode(encode(E'\\x', 'base64url'), 'base64url'), 'base64url'); +SELECT encode(decode(encode(E'\\x00', 'base64url'), 'base64url'), 'base64url'); +SELECT encode(decode(encode(E'\\x0001', 'base64url'), 'base64url'), 'base64url'); +SELECT encode(decode(encode(E'\\x000102', 'base64url'), 'base64url'), 'base64url'); +SELECT encode(decode(encode(E'\\x00010203', 'base64url'), 'base64url'), 'base64url'); + +-- Invalid inputs (should ERROR) +-- invalid character '@' +SELECT decode('QQ@=', 'base64url'); + +-- missing characters (incomplete group) +SELECT decode('QQ', 'base64url'); -- ok (1 byte) +SELECT decode('QQI', 'base64url'); -- ok (2 bytes) +SELECT decode('QQIDQ', 'base64url'); -- ERROR: invalid base64url end sequence + +-- unexpected '=' at start +SELECT decode('=QQQ', 'base64url'); + +-- valid base64 padding in base64url (optional, but accepted) +SELECT decode('abc-_w==', 'base64url'); -- should decode to \x69b73eff + -- -- get_bit/set_bit etc -- @@ -817,7 +938,7 @@ set standard_conforming_strings = off; set escape_string_warning = off; set standard_conforming_strings = on; --- select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6; +select 'a\bcd' as f1, 'a\b''cd' as f2, 'a\b''''cd' as f3, 'abcd\' as f4, 'ab\''cd' as f5, '\\' as f6; set standard_conforming_strings = off; diff --git a/crates/squawk_parser/tests/data/regression_suite/subscription.sql b/crates/squawk_parser/tests/data/regression_suite/subscription.sql index e5706c1c..e6fe5637 100644 --- a/crates/squawk_parser/tests/data/regression_suite/subscription.sql +++ b/crates/squawk_parser/tests/data/regression_suite/subscription.sql @@ -38,7 +38,7 @@ SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname SELECT subname, stats_reset IS NULL stats_reset_is_null FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; -- Reset the stats again and check if the new reset_stats is updated. -SELECT stats_reset as prev_stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub' ; +SELECT stats_reset as prev_stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub' /* \gset */; SELECT pg_stat_reset_subscription_stats(oid) FROM pg_subscription WHERE subname = 'regress_testsub'; SELECT 'prev_stats_reset' < stats_reset FROM pg_stat_subscription_stats WHERE subname = 'regress_testsub'; @@ -71,7 +71,9 @@ CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PU -- now it works CREATE SUBSCRIPTION regress_testsub4 CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (slot_name = NONE, connect = false, origin = none); +-- \dRs+ regress_testsub4 ALTER SUBSCRIPTION regress_testsub4 SET (origin = any); +-- \dRs+ regress_testsub4 DROP SUBSCRIPTION regress_testsub3; DROP SUBSCRIPTION regress_testsub4; @@ -86,12 +88,14 @@ CREATE SUBSCRIPTION regress_testsub5 CONNECTION 'port=-1' PUBLICATION testpub; -- fail - invalid connection string during ALTER ALTER SUBSCRIPTION regress_testsub CONNECTION 'foobar'; +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET PUBLICATION testpub2, testpub3 WITH (refresh = false); ALTER SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist2'; ALTER SUBSCRIPTION regress_testsub SET (slot_name = 'newname'); ALTER SUBSCRIPTION regress_testsub SET (password_required = false); ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = true); +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (password_required = true); ALTER SUBSCRIPTION regress_testsub SET (run_as_owner = false); @@ -106,6 +110,7 @@ ALTER SUBSCRIPTION regress_testsub SET (create_slot = false); -- ok ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/12345'); +-- \dRs+ -- ok - with lsn = NONE ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE); @@ -113,13 +118,16 @@ ALTER SUBSCRIPTION regress_testsub SKIP (lsn = NONE); -- fail ALTER SUBSCRIPTION regress_testsub SKIP (lsn = '0/0'); +-- \dRs+ BEGIN; ALTER SUBSCRIPTION regress_testsub ENABLE; +-- \dRs ALTER SUBSCRIPTION regress_testsub DISABLE; +-- \dRs COMMIT; @@ -132,6 +140,7 @@ ALTER SUBSCRIPTION regress_testsub RENAME TO regress_testsub_foo; ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = local); ALTER SUBSCRIPTION regress_testsub_foo SET (synchronous_commit = foobar); +-- \dRs+ -- rename back to keep the rest simple ALTER SUBSCRIPTION regress_testsub_foo RENAME TO regress_testsub; @@ -160,10 +169,12 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB -- now it works CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, binary = true); +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (binary = false); ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +-- \dRs+ DROP SUBSCRIPTION regress_testsub; @@ -173,13 +184,16 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB -- now it works CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true); +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (streaming = parallel); +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (streaming = false); ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +-- \dRs+ -- fail - publication already exists ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub WITH (refresh = false); @@ -193,6 +207,7 @@ ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refr -- fail - publications already exist ALTER SUBSCRIPTION regress_testsub ADD PUBLICATION testpub1, testpub2 WITH (refresh = false); +-- \dRs+ -- fail - publication used more than once ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub1 WITH (refresh = false); @@ -206,6 +221,7 @@ ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub3 WITH (refresh = fal -- ok - delete publications ALTER SUBSCRIPTION regress_testsub DROP PUBLICATION testpub1, testpub2 WITH (refresh = false); +-- \dRs+ DROP SUBSCRIPTION regress_testsub; @@ -239,9 +255,11 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB -- now it works CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, two_phase = true); +-- \dRs+ -- we can alter streaming when two_phase enabled ALTER SUBSCRIPTION regress_testsub SET (streaming = true); +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); DROP SUBSCRIPTION regress_testsub; @@ -249,6 +267,7 @@ DROP SUBSCRIPTION regress_testsub; -- two_phase and streaming are compatible. CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, streaming = true, two_phase = true); +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); DROP SUBSCRIPTION regress_testsub; @@ -259,9 +278,38 @@ CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUB -- now it works CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, disable_on_error = false); +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (disable_on_error = true); +-- \dRs+ + +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; + +-- fail - retain_dead_tuples must be boolean +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, retain_dead_tuples = foo); + +-- ok +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, retain_dead_tuples = false); + +-- \dRs+ + +ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); +DROP SUBSCRIPTION regress_testsub; + +-- fail - max_retention_duration must be integer +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, max_retention_duration = foo); + +-- ok +CREATE SUBSCRIPTION regress_testsub CONNECTION 'dbname=regress_doesnotexist' PUBLICATION testpub WITH (connect = false, max_retention_duration = 1000); + +-- \dRs+ + +-- ok +ALTER SUBSCRIPTION regress_testsub SET (max_retention_duration = 0); + +-- \dRs+ ALTER SUBSCRIPTION regress_testsub SET (slot_name = NONE); DROP SUBSCRIPTION regress_testsub; diff --git a/crates/squawk_parser/tests/data/regression_suite/subselect.sql b/crates/squawk_parser/tests/data/regression_suite/subselect.sql index a2ff4a08..881c88b6 100644 --- a/crates/squawk_parser/tests/data/regression_suite/subselect.sql +++ b/crates/squawk_parser/tests/data/regression_suite/subselect.sql @@ -110,6 +110,7 @@ SELECT * FROM (SELECT * FROM (SELECT abs(f1) AS a1 FROM int4_tbl)), SELECT * FROM view_unnamed_ss; +-- \sv view_unnamed_ss DROP VIEW view_unnamed_ss; @@ -120,6 +121,7 @@ SELECT * FROM (SELECT * FROM int4_tbl), int8_tbl AS unnamed_subquery WHERE f1 = q1 FOR UPDATE OF unnamed_subquery; +-- \sv view_unnamed_ss_locking DROP VIEW view_unnamed_ss_locking; @@ -359,6 +361,75 @@ select * from float_table select * from numeric_table where num_col in (select float_col from float_table); +-- +-- Test that a semijoin implemented by unique-ifying the RHS can explore +-- different paths of the RHS rel. +-- + +create table semijoin_unique_tbl (a int, b int); +insert into semijoin_unique_tbl select i%10, i%10 from generate_series(1,1000)i; +create index on semijoin_unique_tbl(a, b); +analyze semijoin_unique_tbl; + +-- Ensure that we get a plan with Unique + IndexScan +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a, b from semijoin_unique_tbl t3) +order by t1.a, t2.a; + +-- Ensure that we can unique-ify expressions more complex than plain Vars +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a+1, b+1 from semijoin_unique_tbl t3) +order by t1.a, t2.a; + +-- encourage use of parallel plans +set parallel_setup_cost=0; +set parallel_tuple_cost=0; +set min_parallel_table_scan_size=0; +set max_parallel_workers_per_gather=4; + +set enable_indexscan to off; + +-- Ensure that we get a parallel plan for the unique-ification +explain (verbose, costs off) +select * from semijoin_unique_tbl t1, semijoin_unique_tbl t2 +where (t1.a, t2.a) in (select a, b from semijoin_unique_tbl t3) +order by t1.a, t2.a; + +reset enable_indexscan; + +reset max_parallel_workers_per_gather; +reset min_parallel_table_scan_size; +reset parallel_tuple_cost; +reset parallel_setup_cost; + +drop table semijoin_unique_tbl; + +create table unique_tbl_p (a int, b int) partition by range(a); +create table unique_tbl_p1 partition of unique_tbl_p for values from (0) to (5); +create table unique_tbl_p2 partition of unique_tbl_p for values from (5) to (10); +create table unique_tbl_p3 partition of unique_tbl_p for values from (10) to (20); +insert into unique_tbl_p select i%12, i from generate_series(0, 1000)i; +create index on unique_tbl_p1(a); +create index on unique_tbl_p2(a); +create index on unique_tbl_p3(a); +analyze unique_tbl_p; + +set enable_partitionwise_join to on; + +-- Ensure that the unique-ification works for partition-wise join +-- (Only one of the two joins will be done partitionwise, but that's good +-- enough for our purposes.) +explain (verbose, costs off) +select * from unique_tbl_p t1, unique_tbl_p t2 +where (t1.a, t2.a) in (select a, a from unique_tbl_p t3) +order by t1.a, t2.a; + +reset enable_partitionwise_join; + +drop table unique_tbl_p; + -- -- Test case for bug #4290: bogus calculation of subplan param sets -- @@ -410,6 +481,15 @@ select (select view_a) from view_a; select (select (select view_a)) from view_a; select (select (a.*)::text) from view_a a; +-- +-- Test case for bug #19037: no relation entry for relid N +-- + +explain (costs off) +select (1 = any(array_agg(f1))) = any (select false) from int4_tbl; + +select (1 = any(array_agg(f1))) = any (select false) from int4_tbl; + -- -- Check that whole-row Vars reading the result of a subselect don't include -- any junk columns therein @@ -918,6 +998,23 @@ fetch backward all in c1; commit; +-- +-- Check that JsonConstructorExpr is treated as non-strict, and thus can be +-- wrapped in a PlaceHolderVar +-- + +begin; + +create temp table json_tab (a int); +insert into json_tab values (1); + +explain (verbose, costs off) +select * from json_tab t1 left join (select json_array(1, a) from json_tab t2) s on false; + +select * from json_tab t1 left join (select json_array(1, a) from json_tab t2) s on false; + +rollback; + -- -- Verify that we correctly flatten cases involving a subquery output -- expression that doesn't need to be wrapped in a PlaceHolderVar @@ -1039,7 +1136,7 @@ explain (verbose, costs off) select ss2.* from int8_tbl t1 left join (int8_tbl t2 left join - (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join + (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1) on t1.q2 = ss2.q1 order by 1, 2, 3; @@ -1047,7 +1144,7 @@ order by 1, 2, 3; select ss2.* from int8_tbl t1 left join (int8_tbl t2 left join - (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join + (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 inner join lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1) on t1.q2 = ss2.q1 order by 1, 2, 3; @@ -1057,7 +1154,7 @@ explain (verbose, costs off) select ss2.* from int8_tbl t1 left join (int8_tbl t2 left join - (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join + (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1) on t1.q2 = ss2.q1 order by 1, 2, 3; @@ -1065,7 +1162,7 @@ order by 1, 2, 3; select ss2.* from int8_tbl t1 left join (int8_tbl t2 left join - (select coalesce(q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join + (select coalesce(q1, q1) as x, * from int8_tbl t3) ss1 on t2.q1 = ss1.q2 left join lateral (select ss1.x as y, * from int8_tbl t4) ss2 on t2.q2 = ss2.q1) on t1.q2 = ss2.q1 order by 1, 2, 3; diff --git a/crates/squawk_parser/tests/data/regression_suite/sysviews.sql b/crates/squawk_parser/tests/data/regression_suite/sysviews.sql index d0917b68..66179f02 100644 --- a/crates/squawk_parser/tests/data/regression_suite/sysviews.sql +++ b/crates/squawk_parser/tests/data/regression_suite/sysviews.sql @@ -101,21 +101,3 @@ select count(distinct utc_offset) >= 24 as ok from pg_timezone_abbrevs; -- One specific case we can check without much fear of breakage -- is the historical local-mean-time value used for America/Los_Angeles. select * from pg_timezone_abbrevs where abbrev = 'LMT'; - -DO $$ -DECLARE - bg_writer_pid int; - r RECORD; -BEGIN - SELECT pid from pg_stat_activity where backend_type='background writer' - INTO bg_writer_pid; - - select type, name, ident - from pg_get_process_memory_contexts(bg_writer_pid, false, 20) - where path = '{1}' into r; - RAISE NOTICE '%', r; - select type, name, ident - from pg_get_process_memory_contexts(pg_backend_pid(), false, 20) - where path = '{1}' into r; - RAISE NOTICE '%', r; -END $$; diff --git a/crates/squawk_parser/tests/data/regression_suite/tablesample.sql b/crates/squawk_parser/tests/data/regression_suite/tablesample.sql index b9baba28..0dcb014e 100644 --- a/crates/squawk_parser/tests/data/regression_suite/tablesample.sql +++ b/crates/squawk_parser/tests/data/regression_suite/tablesample.sql @@ -19,6 +19,8 @@ CREATE VIEW test_tablesample_v1 AS SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (10*2) REPEATABLE (2); CREATE VIEW test_tablesample_v2 AS SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (99); +-- \d+ test_tablesample_v1 +-- \d+ test_tablesample_v2 -- check a sampled query doesn't affect cursor in progress BEGIN; diff --git a/crates/squawk_parser/tests/data/regression_suite/tablespace.sql b/crates/squawk_parser/tests/data/regression_suite/tablespace.sql index 252553f9..4db1b3b7 100644 --- a/crates/squawk_parser/tests/data/regression_suite/tablespace.sql +++ b/crates/squawk_parser/tests/data/regression_suite/tablespace.sql @@ -71,14 +71,14 @@ SELECT c.relname FROM pg_class c, pg_tablespace s -- relfilenode. -- Save first the existing relfilenode for the toast and main relations. SELECT relfilenode as main_filenode FROM pg_class - WHERE relname = 'regress_tblspace_test_tbl_idx' ; + WHERE relname = 'regress_tblspace_test_tbl_idx' /* \gset */; SELECT relfilenode as toast_filenode FROM pg_class WHERE oid = (SELECT i.indexrelid FROM pg_class c, pg_index i WHERE i.indrelid = c.reltoastrelid AND - c.relname = 'regress_tblspace_test_tbl') ; + c.relname = 'regress_tblspace_test_tbl') /* \gset */; REINDEX (TABLESPACE regress_tblspace) TABLE regress_tblspace_test_tbl; SELECT c.relname FROM pg_class c, pg_tablespace s WHERE c.reltablespace = s.oid AND s.spcname = 'regress_tblspace' @@ -180,6 +180,8 @@ SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c where c.reltablespace = t.oid AND c.relname = 'foo_idx'; -- check \d output +-- \d testschema.foo +-- \d testschema.foo_idx -- -- partitioned table @@ -222,6 +224,12 @@ CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace; CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2); SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx' ORDER BY relname; +-- \d testschema.part +-- \d+ testschema.part +-- \d testschema.part1 +-- \d+ testschema.part1 +-- \d testschema.part_a_idx +-- \d+ testschema.part_a_idx -- partitioned rels cannot specify the default tablespace. These fail: CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default; @@ -243,20 +251,40 @@ CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_ ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id); ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 -- use a custom tablespace for default_tablespace SET default_tablespace TO regress_tblspace; -- tablespace should not change if no rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 SELECT * FROM testschema.test_default_tab; -- tablespace should not change even if there is an index rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 SELECT * FROM testschema.test_default_tab; -- now use the default tablespace for default_tablespace SET default_tablespace TO ''; -- tablespace should not change if no rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE int; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 -- tablespace should not change even if there is an index rewrite ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 DROP TABLE testschema.test_default_tab; -- check that default_tablespace doesn't affect ALTER TABLE index rebuilds @@ -271,20 +299,40 @@ CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regre ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id); ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 -- use a custom tablespace for default_tablespace SET default_tablespace TO regress_tblspace; -- tablespace should not change if no rewrite ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 SELECT * FROM testschema.test_default_tab_p; -- tablespace should not change even if there is an index rewrite ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 SELECT * FROM testschema.test_default_tab_p; -- now use the default tablespace for default_tablespace SET default_tablespace TO ''; -- tablespace should not change if no rewrite ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 -- tablespace should not change even if there is an index rewrite ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint; +-- \d testschema.test_index1 +-- \d testschema.test_index2 +-- \d testschema.test_index3 +-- \d testschema.test_index4 DROP TABLE testschema.test_default_tab_p; -- check that default_tablespace affects index additions in ALTER TABLE @@ -294,6 +342,8 @@ SET default_tablespace TO regress_tblspace; ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id); SET default_tablespace TO ''; ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id); +-- \d testschema.test_tab_unique +-- \d testschema.test_tab_pkey SELECT * FROM testschema.test_tab; DROP TABLE testschema.test_tab; @@ -305,7 +355,13 @@ ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a); CREATE INDEX test_tab_a_idx ON testschema.test_tab (a); SET default_tablespace TO ''; CREATE INDEX test_tab_b_idx ON testschema.test_tab (b); +-- \d testschema.test_tab_unique +-- \d testschema.test_tab_a_idx +-- \d testschema.test_tab_b_idx ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c); +-- \d testschema.test_tab_unique +-- \d testschema.test_tab_a_idx +-- \d testschema.test_tab_b_idx DROP TABLE testschema.test_tab; -- let's try moving a table from one place to another @@ -341,6 +397,7 @@ ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default; DROP TABLESPACE regress_tblspace; -- Adequate cache initialization before GRANT +-- \c - BEGIN; GRANT ALL ON TABLESPACE regress_tblspace TO PUBLIC; ROLLBACK; diff --git a/crates/squawk_parser/tests/data/regression_suite/temp.sql b/crates/squawk_parser/tests/data/regression_suite/temp.sql index 4020d396..61529670 100644 --- a/crates/squawk_parser/tests/data/regression_suite/temp.sql +++ b/crates/squawk_parser/tests/data/regression_suite/temp.sql @@ -47,6 +47,7 @@ DROP TABLE temptest; CREATE TEMP TABLE temptest(col int); +-- \c SELECT * FROM temptest; @@ -305,6 +306,7 @@ prepare transaction 'twophase_tab'; -- Corner case: current_schema may create a temporary schema if namespace -- creation is pending, so check after that. First reset the connection -- to remove the temporary namespace. +-- \c - SET search_path TO 'pg_temp'; BEGIN; SELECT current_schema() ~ 'pg_temp' AS is_temp_schema; @@ -315,6 +317,7 @@ PREPARE TRANSACTION 'twophase_search'; -- related matters. -- use lower possible buffer limit to make the test cheaper +-- \c SET temp_buffers = 100; CREATE TEMPORARY TABLE test_temp(a int not null unique, b TEXT not null, cnt int not null); @@ -323,6 +326,7 @@ INSERT INTO test_temp SELECT generate_series(1, 10000) as id, repeat('a', 200), SELECT pg_relation_size('test_temp') / current_setting('block_size')::int8 > 200; -- Don't want cursor names and plpgsql function lines in the error messages +-- \set VERBOSITY terse /* helper function to create cursors for each page in [p_start, p_end] */ CREATE FUNCTION test_temp_pin(p_start int, p_end int) diff --git a/crates/squawk_parser/tests/data/regression_suite/test_setup.sql b/crates/squawk_parser/tests/data/regression_suite/test_setup.sql index fc502d8d..7daccc8d 100644 --- a/crates/squawk_parser/tests/data/regression_suite/test_setup.sql +++ b/crates/squawk_parser/tests/data/regression_suite/test_setup.sql @@ -3,7 +3,11 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix -- -- synchronous_commit=off delays when hint bits may be set. Some plans change @@ -130,6 +134,7 @@ CREATE TABLE onek ( string4 name ); +-- \set filename :abs_srcdir '/data/onek.data' COPY onek FROM 'filename'; VACUUM ANALYZE onek; @@ -155,6 +160,7 @@ CREATE TABLE tenk1 ( string4 name ); +-- \set filename :abs_srcdir '/data/tenk.data' COPY tenk1 FROM 'filename'; VACUUM ANALYZE tenk1; @@ -167,6 +173,7 @@ CREATE TABLE person ( location point ); +-- \set filename :abs_srcdir '/data/person.data' COPY person FROM 'filename'; VACUUM ANALYZE person; @@ -175,6 +182,7 @@ CREATE TABLE emp ( manager name ) INHERITS (person); +-- \set filename :abs_srcdir '/data/emp.data' COPY emp FROM 'filename'; VACUUM ANALYZE emp; @@ -182,6 +190,7 @@ CREATE TABLE student ( gpa float8 ) INHERITS (person); +-- \set filename :abs_srcdir '/data/student.data' COPY student FROM 'filename'; VACUUM ANALYZE student; @@ -189,6 +198,7 @@ CREATE TABLE stud_emp ( percent int4 ) INHERITS (emp, student); +-- \set filename :abs_srcdir '/data/stud_emp.data' COPY stud_emp FROM 'filename'; VACUUM ANALYZE stud_emp; @@ -197,6 +207,7 @@ CREATE TABLE road ( thepath path ); +-- \set filename :abs_srcdir '/data/streets.data' COPY road FROM 'filename'; VACUUM ANALYZE road; diff --git a/crates/squawk_parser/tests/data/regression_suite/tidrangescan.sql b/crates/squawk_parser/tests/data/regression_suite/tidrangescan.sql index ac09ebb6..1ac3995e 100644 --- a/crates/squawk_parser/tests/data/regression_suite/tidrangescan.sql +++ b/crates/squawk_parser/tests/data/regression_suite/tidrangescan.sql @@ -98,4 +98,48 @@ COMMIT; DROP TABLE tidrangescan; +-- Tests for parallel TID Range Scans +BEGIN; + +SET LOCAL parallel_setup_cost TO 0; +SET LOCAL parallel_tuple_cost TO 0; +SET LOCAL min_parallel_table_scan_size TO 0; +SET LOCAL max_parallel_workers_per_gather TO 4; + +CREATE TABLE parallel_tidrangescan (id integer, data text) +WITH (fillfactor = 10); + +-- Insert enough tuples such that each page gets 5 tuples with fillfactor = 10 +INSERT INTO parallel_tidrangescan +SELECT i, repeat('x', 100) FROM generate_series(1,200) AS s(i); + +-- Ensure there are 40 pages for parallel test +SELECT min(ctid), max(ctid) FROM parallel_tidrangescan; + +-- Parallel range scans with upper bound +EXPLAIN (COSTS OFF) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)'; +SELECT count(*) FROM parallel_tidrangescan WHERE ctid < '(30,1)'; + +-- Parallel range scans with lower bound +EXPLAIN (COSTS OFF) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)'; +SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)'; + +-- Parallel range scans with both bounds +EXPLAIN (COSTS OFF) +SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)'; +SELECT count(*) FROM parallel_tidrangescan WHERE ctid > '(10,0)' AND ctid < '(30,1)'; + +-- Parallel rescans +EXPLAIN (COSTS OFF) +SELECT t.ctid,t2.c FROM parallel_tidrangescan t, +LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + +SELECT t.ctid,t2.c FROM parallel_tidrangescan t, +LATERAL (SELECT count(*) c FROM parallel_tidrangescan t2 WHERE t2.ctid <= t.ctid) t2 +WHERE t.ctid < '(1,0)'; + +ROLLBACK; RESET enable_seqscan; diff --git a/crates/squawk_parser/tests/data/regression_suite/timestamp.sql b/crates/squawk_parser/tests/data/regression_suite/timestamp.sql index 55f80530..313757ed 100644 --- a/crates/squawk_parser/tests/data/regression_suite/timestamp.sql +++ b/crates/squawk_parser/tests/data/regression_suite/timestamp.sql @@ -175,7 +175,9 @@ SELECT d1 - timestamp without time zone '1997-01-02' AS diff FROM TIMESTAMP_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; SELECT date_trunc( 'week', timestamp '2004-02-29 15:44:17.71393' ) AS week_trunc; - +SELECT date_trunc( 'week', timestamp 'infinity' ) AS inf_trunc; +SELECT date_trunc( 'timezone', timestamp '2004-02-29 15:44:17.71393' ) AS notsupp_trunc; +SELECT date_trunc( 'timezone', timestamp 'infinity' ) AS notsupp_inf_trunc; SELECT date_trunc( 'ago', timestamp 'infinity' ) AS invalid_trunc; -- verify date_bin behaves the same as date_trunc for relevant intervals diff --git a/crates/squawk_parser/tests/data/regression_suite/timestamptz.sql b/crates/squawk_parser/tests/data/regression_suite/timestamptz.sql index 91555235..643efe8b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/timestamptz.sql +++ b/crates/squawk_parser/tests/data/regression_suite/timestamptz.sql @@ -217,15 +217,18 @@ SELECT d1 - timestamp with time zone '1997-01-02' AS diff FROM TIMESTAMPTZ_TBL WHERE d1 BETWEEN '1902-01-01' AND '2038-01-01'; SELECT date_trunc( 'week', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS week_trunc; +SELECT date_trunc( 'week', timestamp with time zone 'infinity' ) AS inf_trunc; +SELECT date_trunc( 'timezone', timestamp with time zone '2004-02-29 15:44:17.71393' ) AS notsupp_trunc; +SELECT date_trunc( 'timezone', timestamp with time zone 'infinity' ) AS notsupp_inf_trunc; SELECT date_trunc( 'ago', timestamp with time zone 'infinity' ) AS invalid_trunc; SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'Australia/Sydney') as sydney_trunc; -- zone name SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'GMT') as gmt_trunc; -- fixed-offset abbreviation SELECT date_trunc('day', timestamp with time zone '2001-02-16 20:38:40+00', 'VET') as vet_trunc; -- variable-offset abbreviation +SELECT date_trunc('timezone', timestamp with time zone 'infinity', 'GMT') AS notsupp_zone_trunc; +SELECT date_trunc( 'week', timestamp with time zone 'infinity', 'GMT') AS inf_zone_trunc; SELECT date_trunc('ago', timestamp with time zone 'infinity', 'GMT') AS invalid_zone_trunc; - - -- verify date_bin behaves the same as date_trunc for relevant intervals SELECT str, @@ -663,7 +666,9 @@ CREATE VIEW timestamp_local_view AS TIMESTAMP '1978-07-07 19:38' AT LOCAL AS t_at_local, timezone(TIMESTAMP '1978-07-07 19:38') AS t_func; SELECT pg_get_viewdef('timestamp_local_view', true); +-- \x TABLE timestamp_local_view; +-- \x DROP VIEW timestamp_local_view; COMMIT; diff --git a/crates/squawk_parser/tests/data/regression_suite/transactions.sql b/crates/squawk_parser/tests/data/regression_suite/transactions.sql index 0568a355..0f1e172b 100644 --- a/crates/squawk_parser/tests/data/regression_suite/transactions.sql +++ b/crates/squawk_parser/tests/data/regression_suite/transactions.sql @@ -528,101 +528,101 @@ RESET default_transaction_read_only; DROP TABLE trans_abc; --- -- Test assorted behaviors around the implicit transaction block created --- -- when multiple SQL commands are sent in a single Query message. These --- -- tests rely on the fact that psql will not break SQL commands apart at a --- -- backslash-quoted semicolon, but will send them as one Query. +-- Test assorted behaviors around the implicit transaction block created +-- when multiple SQL commands are sent in a single Query message. These +-- tests rely on the fact that psql will not break SQL commands apart at a +-- backslash-quoted semicolon, but will send them as one Query. --- create temp table i_table (f1 int); +create temp table i_table (f1 int); --- -- psql will show all results of a multi-statement Query +-- psql will show all results of a multi-statement Query -- SELECT 1\; SELECT 2\; SELECT 3; --- -- this implicitly commits: +-- this implicitly commits: -- insert into i_table values(1)\; select * from i_table; --- -- 1/0 error will cause rolling back the whole implicit transaction +-- 1/0 error will cause rolling back the whole implicit transaction -- insert into i_table values(2)\; select * from i_table\; select 1/0; --- select * from i_table; +select * from i_table; --- rollback; -- we are not in a transaction at this point +rollback; -- we are not in a transaction at this point --- -- can use regular begin/commit/rollback within a single Query +-- can use regular begin/commit/rollback within a single Query -- begin\; insert into i_table values(3)\; commit; --- rollback; -- we are not in a transaction at this point +rollback; -- we are not in a transaction at this point -- begin\; insert into i_table values(4)\; rollback; --- rollback; -- we are not in a transaction at this point +rollback; -- we are not in a transaction at this point --- -- begin converts implicit transaction into a regular one that --- -- can extend past the end of the Query +-- begin converts implicit transaction into a regular one that +-- can extend past the end of the Query -- select 1\; begin\; insert into i_table values(5); --- commit; +commit; -- select 1\; begin\; insert into i_table values(6); --- rollback; +rollback; --- -- commit in implicit-transaction state commits but issues a warning. +-- commit in implicit-transaction state commits but issues a warning. -- insert into i_table values(7)\; commit\; insert into i_table values(8)\; select 1/0; --- -- similarly, rollback aborts but issues a warning. +-- similarly, rollback aborts but issues a warning. -- insert into i_table values(9)\; rollback\; select 2; --- select * from i_table; +select * from i_table; --- rollback; -- we are not in a transaction at this point +rollback; -- we are not in a transaction at this point --- -- implicit transaction block is still a transaction block, for e.g. VACUUM +-- implicit transaction block is still a transaction block, for e.g. VACUUM -- SELECT 1\; VACUUM; -- SELECT 1\; COMMIT\; VACUUM; --- -- we disallow savepoint-related commands in implicit-transaction state +-- we disallow savepoint-related commands in implicit-transaction state -- SELECT 1\; SAVEPOINT sp; -- SELECT 1\; COMMIT\; SAVEPOINT sp; -- ROLLBACK TO SAVEPOINT sp\; SELECT 2; -- SELECT 2\; RELEASE SAVEPOINT sp\; SELECT 3; --- -- but this is OK, because the BEGIN converts it to a regular xact +-- but this is OK, because the BEGIN converts it to a regular xact -- SELECT 1\; BEGIN\; SAVEPOINT sp\; ROLLBACK TO SAVEPOINT sp\; COMMIT; --- -- Tests for AND CHAIN in implicit transaction blocks +-- Tests for AND CHAIN in implicit transaction blocks -- SET TRANSACTION READ ONLY\; COMMIT AND CHAIN; -- error --- SHOW transaction_read_only; +SHOW transaction_read_only; -- SET TRANSACTION READ ONLY\; ROLLBACK AND CHAIN; -- error --- SHOW transaction_read_only; +SHOW transaction_read_only; --- CREATE TABLE trans_abc (a int); +CREATE TABLE trans_abc (a int); --- -- COMMIT/ROLLBACK + COMMIT/ROLLBACK AND CHAIN +-- COMMIT/ROLLBACK + COMMIT/ROLLBACK AND CHAIN -- INSERT INTO trans_abc VALUES (7)\; COMMIT\; INSERT INTO trans_abc VALUES (8)\; COMMIT AND CHAIN; -- 7 commit, 8 error -- INSERT INTO trans_abc VALUES (9)\; ROLLBACK\; INSERT INTO trans_abc VALUES (10)\; ROLLBACK AND CHAIN; -- 9 rollback, 10 error --- -- COMMIT/ROLLBACK AND CHAIN + COMMIT/ROLLBACK +-- COMMIT/ROLLBACK AND CHAIN + COMMIT/ROLLBACK -- INSERT INTO trans_abc VALUES (11)\; COMMIT AND CHAIN\; INSERT INTO trans_abc VALUES (12)\; COMMIT; -- 11 error, 12 not reached -- INSERT INTO trans_abc VALUES (13)\; ROLLBACK AND CHAIN\; INSERT INTO trans_abc VALUES (14)\; ROLLBACK; -- 13 error, 14 not reached --- -- START TRANSACTION + COMMIT/ROLLBACK AND CHAIN +-- START TRANSACTION + COMMIT/ROLLBACK AND CHAIN -- START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (15)\; COMMIT AND CHAIN; -- 15 ok --- SHOW transaction_isolation; -- transaction is active at this point --- COMMIT; +SHOW transaction_isolation; -- transaction is active at this point +COMMIT; -- START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (16)\; ROLLBACK AND CHAIN; -- 16 ok --- SHOW transaction_isolation; -- transaction is active at this point --- ROLLBACK; +SHOW transaction_isolation; -- transaction is active at this point +ROLLBACK; --- SET default_transaction_isolation = 'read committed'; +SET default_transaction_isolation = 'read committed'; --- -- START TRANSACTION + COMMIT/ROLLBACK + COMMIT/ROLLBACK AND CHAIN +-- START TRANSACTION + COMMIT/ROLLBACK + COMMIT/ROLLBACK AND CHAIN -- START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (17)\; COMMIT\; INSERT INTO trans_abc VALUES (18)\; COMMIT AND CHAIN; -- 17 commit, 18 error --- SHOW transaction_isolation; -- out of transaction block +SHOW transaction_isolation; -- out of transaction block -- START TRANSACTION ISOLATION LEVEL REPEATABLE READ\; INSERT INTO trans_abc VALUES (19)\; ROLLBACK\; INSERT INTO trans_abc VALUES (20)\; ROLLBACK AND CHAIN; -- 19 rollback, 20 error --- SHOW transaction_isolation; -- out of transaction block +SHOW transaction_isolation; -- out of transaction block --- RESET default_transaction_isolation; +RESET default_transaction_isolation; --- SELECT * FROM trans_abc ORDER BY 1; +SELECT * FROM trans_abc ORDER BY 1; --- DROP TABLE trans_abc; +DROP TABLE trans_abc; -- TRANSACTION SNAPSHOT -- Incorrect identifier. diff --git a/crates/squawk_parser/tests/data/regression_suite/triggers.sql b/crates/squawk_parser/tests/data/regression_suite/triggers.sql index 8309b649..165356db 100644 --- a/crates/squawk_parser/tests/data/regression_suite/triggers.sql +++ b/crates/squawk_parser/tests/data/regression_suite/triggers.sql @@ -3,7 +3,10 @@ -- -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix CREATE FUNCTION trigger_return_old () RETURNS trigger @@ -97,6 +100,13 @@ CREATE TABLE log_table (tstamp timestamp default timeofday()::timestamp); CREATE TABLE main_table (a int unique, b int); +COPY main_table (a,b) FROM stdin; +-- 5 10 +-- 20 20 +-- 30 10 +-- 50 35 +-- 80 15 +-- \. CREATE FUNCTION trigger_func() RETURNS trigger LANGUAGE plpgsql AS ' BEGIN @@ -136,6 +146,10 @@ UPDATE main_table SET a = a + 2 WHERE b > 100; ALTER TABLE main_table DROP CONSTRAINT main_table_a_key; -- COPY should fire per-row and per-statement INSERT triggers +COPY main_table (a, b) FROM stdin; +-- 30 40 +-- 50 60 +-- \. SELECT * FROM main_table ORDER BY a, b; @@ -167,6 +181,10 @@ SELECT trigger_name, event_manipulation, event_object_schema, event_object_table WHERE event_object_table IN ('main_table') ORDER BY trigger_name COLLATE "C", 2; INSERT INTO main_table (a) VALUES (123), (456); +COPY main_table FROM stdin; +-- 123 999 +-- 456 999 +-- \. DELETE FROM main_table WHERE a IN (123, 456); UPDATE main_table SET a = 50, b = 60; SELECT * FROM main_table ORDER BY a, b; @@ -242,7 +260,7 @@ UPDATE some_t SET some_col = FALSE; UPDATE some_t SET some_col = TRUE; DROP TABLE some_t; --- bogus cases +-- -- bogus cases -- CREATE TRIGGER error_upd_and_col BEFORE UPDATE OR UPDATE OF a ON main_table -- FOR EACH ROW EXECUTE PROCEDURE trigger_func('error_upd_and_col'); -- CREATE TRIGGER error_upd_a_a BEFORE UPDATE OF a, a ON main_table @@ -483,6 +501,7 @@ CREATE TRIGGER z_min_update BEFORE UPDATE ON min_updates_test FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); +-- \set QUIET false UPDATE min_updates_test SET f1 = f1; @@ -490,6 +509,7 @@ UPDATE min_updates_test SET f2 = f2 + 1; UPDATE min_updates_test SET f3 = 2 WHERE f3 is null; +-- \set QUIET true SELECT * FROM min_updates_test; @@ -621,6 +641,7 @@ FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_upd_stmt'); CREATE TRIGGER after_del_stmt_trig AFTER DELETE ON main_view FOR EACH STATEMENT EXECUTE PROCEDURE view_trigger('after_view_del_stmt'); +-- \set QUIET false -- Insert into view using trigger INSERT INTO main_view VALUES (20, 30); @@ -642,12 +663,15 @@ UPDATE main_view SET b = 0 WHERE false; DELETE FROM main_view WHERE a IN (20,21); DELETE FROM main_view WHERE a = 31 RETURNING a, b; +-- \set QUIET true -- Describe view should list triggers +-- \d main_view -- Test dropping view triggers DROP TRIGGER instead_of_insert_trig ON main_view; DROP TRIGGER instead_of_delete_trig ON main_view; +-- \d+ main_view DROP VIEW main_view; -- @@ -748,6 +772,7 @@ $$; CREATE TRIGGER city_update_trig INSTEAD OF UPDATE ON city_view FOR EACH ROW EXECUTE PROCEDURE city_update(); +-- \set QUIET false -- INSERT .. RETURNING INSERT INTO city_view(city_name) VALUES('Tokyo') RETURNING *; @@ -771,6 +796,7 @@ UPDATE city_view v1 SET country_name = v2.country_name FROM city_view v2 -- DELETE .. RETURNING DELETE FROM city_view WHERE city_name = 'Birmingham' RETURNING *; +-- \set QUIET true -- read-only view with WHERE clause CREATE VIEW european_city_view AS @@ -783,11 +809,13 @@ AS 'begin RETURN NULL; end'; CREATE TRIGGER no_op_trig INSTEAD OF INSERT OR UPDATE OR DELETE ON european_city_view FOR EACH ROW EXECUTE PROCEDURE no_op_trig_fn(); +-- \set QUIET false INSERT INTO european_city_view VALUES (0, 'x', 10000, 'y', 'z'); UPDATE european_city_view SET population = 10000; DELETE FROM european_city_view; +-- \set QUIET true -- rules bypassing no-op triggers CREATE RULE european_city_insert_rule AS ON INSERT TO european_city_view @@ -806,6 +834,7 @@ RETURNING NEW.*; CREATE RULE european_city_delete_rule AS ON DELETE TO european_city_view DO INSTEAD DELETE FROM city_view WHERE city_id = OLD.city_id RETURNING *; +-- \set QUIET false -- INSERT not limited by view's WHERE clause, but UPDATE AND DELETE are INSERT INTO european_city_view(city_name, country_name) @@ -829,6 +858,7 @@ UPDATE city_view v SET population = 599657 RETURNING co.country_id, v.country_name, v.city_id, v.city_name, v.population; +-- \set QUIET true SELECT * FROM city_view; @@ -1228,6 +1258,7 @@ select tgrelid::regclass, tgname, tgfoid::regproc from pg_trigger -- check detach behavior create trigger trg1 after insert on trigpart for each row execute procedure trigger_nothing(); +-- \d trigpart3 alter table trigpart detach partition trigpart3; drop trigger trg1 on trigpart3; -- fail due to "does not exist" alter table trigpart detach partition trigpart4; @@ -1242,12 +1273,14 @@ select tgrelid::regclass::text, tgname, tgfoid::regproc, tgenabled, tgisinternal where tgname ~ '^trg1' order by 1; create table trigpart3 (like trigpart); create trigger trg1 after insert on trigpart3 for each row execute procedure trigger_nothing(); +-- \d trigpart3 alter table trigpart attach partition trigpart3 FOR VALUES FROM (2000) to (3000); -- fail drop table trigpart3; -- check display of unrelated triggers create trigger samename after delete on trigpart execute function trigger_nothing(); create trigger samename after delete on trigpart1 execute function trigger_nothing(); +-- \d trigpart1 drop table trigpart; drop function trigger_nothing(); @@ -1337,10 +1370,12 @@ delete from parted_stmt_trig; copy parted_stmt_trig(a) from stdin; -- 1 -- 2 +-- \. -- insert via copy on the first partition copy parted_stmt_trig1(a) from stdin; -- 1 +-- \. -- Disabling a trigger in the parent table should disable children triggers too alter table parted_stmt_trig disable trigger trig_ins_after_parent; @@ -1541,6 +1576,19 @@ select * from parted; drop table parted; drop function parted_trigfunc(); +-- +-- Constraint triggers +-- +create constraint trigger crtr + after insert on foo not valid + for each row execute procedure foo (); +create constraint trigger crtr + after insert on foo no inherit + for each row execute procedure foo (); +create constraint trigger crtr + after insert on foo not enforced + for each row execute procedure foo (); + -- -- Constraint triggers and partitioned tables create table parted_constr_ancestor (a int, b text) @@ -1556,7 +1604,7 @@ create constraint trigger parted_trig after insert on parted_constr_ancestor deferrable for each row execute procedure trigger_notice_ab(); create constraint trigger parted_trig_two after insert on parted_constr - deferrable initially deferred + deferrable initially deferred enforced for each row when (bark(new.b) AND new.a % 2 = 1) execute procedure trigger_notice_ab(); @@ -1885,6 +1933,12 @@ copy parent (a, b) from stdin; -- AAA 42 -- BBB 42 -- CCC 42 +-- \. + +-- check detach/reattach behavior; statement triggers with transition tables +-- should not prevent a table from becoming a partition again +alter table parent detach partition child1; +alter table parent attach partition child1 for values in ('AAA'); -- DML affecting parent sees tuples collected from children even if -- there is no transition table trigger on the children @@ -1905,6 +1959,7 @@ copy parent (a, b) from stdin; -- AAA 42 -- BBB 42 -- CCC 42 +-- \. -- insert into parent with a before trigger on a child tuple before -- insertion, and we capture the newly modified row in parent format @@ -1929,6 +1984,7 @@ copy parent (a, b) from stdin; -- AAA 42 -- BBB 42 -- CCC 234 +-- \. drop table child1, child2, child3, parent; drop function intercept_insert(); @@ -2094,12 +2150,19 @@ copy parent (a, b) from stdin; -- AAA 42 -- BBB 42 -- CCC 42 +-- \. -- same behavior for copy if there is an index (interesting because rows are -- captured by a different code path in copyfrom.c if there are indexes) create index on parent(b); copy parent (a, b) from stdin; -- DDD 42 +-- \. + +-- check disinherit/reinherit behavior; statement triggers with transition +-- tables should not prevent a table from becoming an inheritance child again +alter table child1 no inherit parent; +alter table child1 inherit parent; -- DML affecting parent sees tuples collected from children even if -- there is no transition table trigger on the children @@ -2653,6 +2716,7 @@ for each row execute procedure f(); create trigger parenttrig after insert on child for each row execute procedure f(); alter trigger parenttrig on parent rename to anothertrig; +-- \d+ child drop table parent, child; drop function f(); @@ -2660,8 +2724,8 @@ drop function f(); -- Test who runs deferred trigger functions -- setup -create role regress_groot; -create role regress_outis; +create role regress_caller; +create role regress_fn_owner; create function whoami() returns trigger language plpgsql as $$ begin @@ -2669,7 +2733,7 @@ begin return null; end; $$; -alter function whoami() owner to regress_outis; +alter function whoami() owner to regress_fn_owner; create table defer_trig (id integer); grant insert on defer_trig to public; @@ -2680,10 +2744,10 @@ create constraint trigger whoami after insert on defer_trig -- deferred triggers must run as the user that queued the trigger begin; -set role regress_groot; +set role regress_caller; insert into defer_trig values (1); reset role; -set role regress_outis; +set role regress_fn_owner; insert into defer_trig values (2); reset role; commit; @@ -2691,7 +2755,7 @@ commit; -- security definer functions override the user who queued the trigger alter function whoami() security definer; begin; -set role regress_groot; +set role regress_caller; insert into defer_trig values (3); reset role; commit; @@ -2708,7 +2772,7 @@ end; $$; begin; -set role regress_groot; +set role regress_caller; insert into defer_trig values (4); reset role; commit; -- error expected @@ -2717,5 +2781,5 @@ select current_user = session_user; -- clean up drop table defer_trig; drop function whoami(); -drop role regress_outis; -drop role regress_groot; +drop role regress_fn_owner; +drop role regress_caller; diff --git a/crates/squawk_parser/tests/data/regression_suite/tsearch.sql b/crates/squawk_parser/tests/data/regression_suite/tsearch.sql index c644a894..a603db25 100644 --- a/crates/squawk_parser/tests/data/regression_suite/tsearch.sql +++ b/crates/squawk_parser/tests/data/regression_suite/tsearch.sql @@ -1,4 +1,5 @@ -- directory paths are passed to us in environment variables +-- \getenv abs_srcdir PG_ABS_SRCDIR -- -- Sanity checks for text search catalogs @@ -47,6 +48,7 @@ CREATE TABLE test_tsvector( a tsvector ); +-- \set filename :abs_srcdir '/data/tsearch.data' COPY test_tsvector FROM 'filename'; ANALYZE test_tsvector; @@ -150,6 +152,7 @@ CREATE INDEX wowidx1 ON test_tsvector USING gist (a tsvector_ops(siglen=100, sig CREATE INDEX wowidx2 ON test_tsvector USING gist (a tsvector_ops(siglen=1)); +-- \d test_tsvector DROP INDEX wowidx; @@ -184,6 +187,7 @@ DROP INDEX wowidx2; CREATE INDEX wowidx ON test_tsvector USING gist (a tsvector_ops(siglen=484)); +-- \d test_tsvector EXPLAIN (costs off) SELECT count(*) FROM test_tsvector WHERE a @@ 'wr|qh'; @@ -645,12 +649,16 @@ SELECT ts_headline('english', --Rewrite sub system CREATE TABLE test_tsquery (txtkeyword TEXT, txtsample TEXT); +-- \set ECHO none +-- \copy test_tsquery from stdin -- 'New York' new <-> york | big <-> apple | nyc -- Moscow moskva | moscow -- 'Sanct Peter' Peterburg | peter | 'Sanct Peterburg' -- foo & bar & qq foo & (bar | qq) & city -- 1 & (2 <-> 3) 2 <-> 4 -- 5 <-> 6 5 <-> 7 +-- \. +-- \set ECHO all ALTER TABLE test_tsquery ADD COLUMN keyword tsquery; UPDATE test_tsquery SET keyword = to_tsquery('english', txtkeyword); @@ -861,7 +869,7 @@ select websearch_to_tsquery('english', 'abc """"" def'); select websearch_to_tsquery('english', 'cat -"fat rat"'); select websearch_to_tsquery('english', 'cat -"fat rat" cheese'); select websearch_to_tsquery('english', 'abc "def -"'); -select websearch_to_tsquery('english', 'abc "def :"'); +select websearch_to_tsquery('english', 'abc "def "'); select websearch_to_tsquery('english', '"A fat cat" has just eaten a -rat.'); select websearch_to_tsquery('english', '"A fat cat" has just eaten OR !rat.'); diff --git a/crates/squawk_parser/tests/data/regression_suite/tuplesort.sql b/crates/squawk_parser/tests/data/regression_suite/tuplesort.sql index 00f57fab..c075dae8 100644 --- a/crates/squawk_parser/tests/data/regression_suite/tuplesort.sql +++ b/crates/squawk_parser/tests/data/regression_suite/tuplesort.sql @@ -293,15 +293,15 @@ SELECT $$ HAVING count(*) > 1 ORDER BY 2 DESC, 1 DESC, 3 DESC, 4 DESC, 5 DESC, 6 DESC LIMIT 10 -$$ AS qry ; +$$ AS qry /* \gset */; -- test mark/restore with in-memory sorts --- EXPLAIN (COSTS OFF) 'qry'; --- 'qry'; +-- EXPLAIN (COSTS OFF) :qry; +-- :qry; -- test mark/restore with on-disk sorts SET LOCAL work_mem = '100kB'; --- EXPLAIN (COSTS OFF) 'qry'; --- 'qry'; +-- EXPLAIN (COSTS OFF) :qry; +-- :qry; COMMIT; diff --git a/crates/squawk_parser/tests/data/regression_suite/txid.sql b/crates/squawk_parser/tests/data/regression_suite/txid.sql index 5e32f68f..d7abec40 100644 --- a/crates/squawk_parser/tests/data/regression_suite/txid.sql +++ b/crates/squawk_parser/tests/data/regression_suite/txid.sql @@ -59,21 +59,21 @@ SELECT txid_snapshot '1:9223372036854775808:3'; -- test txid_current_if_assigned BEGIN; SELECT txid_current_if_assigned() IS NULL; -SELECT txid_current() ; +SELECT txid_current() /* \gset */; SELECT txid_current_if_assigned() IS NOT DISTINCT FROM BIGINT 'txid_current'; COMMIT; -- test xid status functions BEGIN; -SELECT txid_current() AS committed ; +SELECT txid_current() AS committed /* \gset */; COMMIT; BEGIN; -SELECT txid_current() AS rolledback ; +SELECT txid_current() AS rolledback /* \gset */; ROLLBACK; BEGIN; -SELECT txid_current() AS inprogress ; +SELECT txid_current() AS inprogress /* \gset */; SELECT txid_status('committed') AS committed; SELECT txid_status('rolledback') AS rolledback; diff --git a/crates/squawk_parser/tests/data/regression_suite/type_sanity.sql b/crates/squawk_parser/tests/data/regression_suite/type_sanity.sql index 52cfac54..54d86712 100644 --- a/crates/squawk_parser/tests/data/regression_suite/type_sanity.sql +++ b/crates/squawk_parser/tests/data/regression_suite/type_sanity.sql @@ -13,7 +13,10 @@ -- field can't be 0, we have to check it here. -- directory paths and dlsuffix are passed to us in environment variables +-- \getenv libdir PG_LIBDIR +-- \getenv dlsuffix PG_DLSUFFIX +-- \set regresslib :libdir '/regress' :dlsuffix -- **************** pg_type **************** @@ -536,6 +539,7 @@ CREATE TABLE tab_core_types AS SELECT 'regtype'::regtype type, 'pg_monitor'::regrole, 'pg_class'::regclass::oid, + 'template1'::regdatabase, '(1,1)'::tid, '2'::xid, '3'::cid, '10:20:10,14,15'::txid_snapshot, '10:20:10,14,15'::pg_snapshot, diff --git a/crates/squawk_parser/tests/data/regression_suite/typed_table.sql b/crates/squawk_parser/tests/data/regression_suite/typed_table.sql index 7d4a0665..afb317a4 100644 --- a/crates/squawk_parser/tests/data/regression_suite/typed_table.sql +++ b/crates/squawk_parser/tests/data/regression_suite/typed_table.sql @@ -4,6 +4,7 @@ CREATE TYPE person_type AS (id int, name text); CREATE TABLE persons OF person_type; CREATE TABLE IF NOT EXISTS persons OF person_type; SELECT * FROM persons; +-- \d persons CREATE FUNCTION get_all_persons() RETURNS SETOF person_type LANGUAGE SQL @@ -28,12 +29,14 @@ CREATE TABLE persons2 OF person_type ( UNIQUE (name) ); +-- \d persons2 CREATE TABLE persons3 OF person_type ( PRIMARY KEY (id), name WITH OPTIONS DEFAULT '' ); +-- \d persons3 CREATE TABLE persons4 OF person_type ( name WITH OPTIONS NOT NULL, @@ -66,9 +69,11 @@ CREATE TABLE persons2 OF person_type ( UNIQUE (name) ); +-- \d persons2 CREATE TABLE persons3 OF person_type ( PRIMARY KEY (id), name NOT NULL DEFAULT '' ); +-- \d persons3 diff --git a/crates/squawk_parser/tests/data/regression_suite/unicode.sql b/crates/squawk_parser/tests/data/regression_suite/unicode.sql index 509eeaad..7a92781f 100644 --- a/crates/squawk_parser/tests/data/regression_suite/unicode.sql +++ b/crates/squawk_parser/tests/data/regression_suite/unicode.sql @@ -1,4 +1,7 @@ -SELECT getdatabaseencoding() <> 'UTF8' AS skip_test ; +SELECT getdatabaseencoding() <> 'UTF8' AS skip_test /* \gset */; +-- \if :skip_test +-- \quit +-- \endif SELECT U&'\0061\0308bc' <> U&'\00E4bc' COLLATE "C" AS sanity_check; diff --git a/crates/squawk_parser/tests/data/regression_suite/union.sql b/crates/squawk_parser/tests/data/regression_suite/union.sql index 13700a6b..d0c70faf 100644 --- a/crates/squawk_parser/tests/data/regression_suite/union.sql +++ b/crates/squawk_parser/tests/data/regression_suite/union.sql @@ -459,6 +459,78 @@ drop table events_child, events, other_events; reset enable_indexonlyscan; +-- +-- Test handling of UNION / EXCEPT / INTERSECT with provably empty inputs +-- + +-- Ensure the empty UNION input is pruned and de-duplication is done for the +-- remaining relation. +EXPLAIN (COSTS OFF, VERBOSE) +SELECT two FROM tenk1 WHERE 1=2 +UNION +SELECT four FROM tenk1 +ORDER BY 1; + +-- Validate that the results of the above are correct +SELECT two FROM tenk1 WHERE 1=2 +UNION +SELECT four FROM tenk1 +ORDER BY 1; + +-- All UNION inputs are proven empty. Ensure the planner provides a +-- const-false Result node +EXPLAIN (COSTS OFF, VERBOSE) +SELECT two FROM tenk1 WHERE 1=2 +UNION +SELECT four FROM tenk1 WHERE 1=2 +UNION +SELECT ten FROM tenk1 WHERE 1=2 +ORDER BY 1; + +-- Ensure the planner provides a const-false Result node +EXPLAIN (COSTS OFF, VERBOSE) +SELECT two FROM tenk1 WHERE 1=2 +INTERSECT +SELECT four FROM tenk1 +ORDER BY 1; + +-- As above, with the inputs swapped +EXPLAIN (COSTS OFF, VERBOSE) +SELECT four FROM tenk1 +INTERSECT +SELECT two FROM tenk1 WHERE 1=2 +ORDER BY 1; + +-- Try with both inputs dummy +EXPLAIN (COSTS OFF, VERBOSE) +SELECT four FROM tenk1 WHERE 1=2 +INTERSECT +SELECT two FROM tenk1 WHERE 1=2 +ORDER BY 1; + +-- Ensure the planner provides a const-false Result node when the left input +-- is empty +EXPLAIN (COSTS OFF, VERBOSE) +SELECT two FROM tenk1 WHERE 1=2 +EXCEPT +SELECT four FROM tenk1 +ORDER BY 1; + +-- Ensure the planner only scans the left input when right input is empty +EXPLAIN (COSTS OFF, VERBOSE) +SELECT two FROM tenk1 +EXCEPT ALL +SELECT four FROM tenk1 WHERE 1=2 +ORDER BY 1; + +-- Try a mixed setop case. Ensure the right-hand UNION child gets removed. +EXPLAIN (COSTS OFF, VERBOSE) +SELECT two FROM tenk1 t1 +EXCEPT +SELECT four FROM tenk1 t2 +UNION +SELECT ten FROM tenk1 dummy WHERE 1=2; + -- Test constraint exclusion of UNION ALL subqueries explain (costs off) SELECT * FROM diff --git a/crates/squawk_parser/tests/data/regression_suite/updatable_views.sql b/crates/squawk_parser/tests/data/regression_suite/updatable_views.sql index cb3e37a5..0ed98137 100644 --- a/crates/squawk_parser/tests/data/regression_suite/updatable_views.sql +++ b/crates/squawk_parser/tests/data/regression_suite/updatable_views.sql @@ -1262,6 +1262,7 @@ INSERT INTO base_tbl VALUES (1,2), (2,3), (1,-1); CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a < b WITH LOCAL CHECK OPTION; +-- \d+ rw_view1 SELECT * FROM information_schema.views WHERE table_name = 'rw_view1'; INSERT INTO rw_view1 VALUES(3,4); -- ok @@ -1292,6 +1293,7 @@ CREATE TABLE base_tbl (a int); CREATE VIEW rw_view1 AS SELECT * FROM base_tbl WHERE a > 0; CREATE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a < 10 WITH CHECK OPTION; -- implicitly cascaded +-- \d+ rw_view2 SELECT * FROM information_schema.views WHERE table_name = 'rw_view2'; INSERT INTO rw_view2 VALUES (-5); -- should fail @@ -1304,6 +1306,7 @@ UPDATE rw_view2 SET a = a + 10; -- should fail CREATE OR REPLACE VIEW rw_view2 AS SELECT * FROM rw_view1 WHERE a < 10 WITH LOCAL CHECK OPTION; +-- \d+ rw_view2 SELECT * FROM information_schema.views WHERE table_name = 'rw_view2'; INSERT INTO rw_view2 VALUES (-10); -- ok, but not in view @@ -1317,6 +1320,7 @@ INSERT INTO rw_view2 VALUES (-20); -- should fail INSERT INTO rw_view2 VALUES (30); -- should fail ALTER VIEW rw_view2 RESET (check_option); +-- \d+ rw_view2 SELECT * FROM information_schema.views WHERE table_name = 'rw_view2'; INSERT INTO rw_view2 VALUES (30); -- ok, but not in view SELECT * FROM base_tbl; diff --git a/crates/squawk_parser/tests/data/regression_suite/update.sql b/crates/squawk_parser/tests/data/regression_suite/update.sql index 2f1cba7b..058e32df 100644 --- a/crates/squawk_parser/tests/data/regression_suite/update.sql +++ b/crates/squawk_parser/tests/data/regression_suite/update.sql @@ -207,8 +207,10 @@ ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_100_200 FOR VALUES FROM (100) CREATE TABLE part_c_1_100 (e varchar, d int, c numeric, b bigint, a text); ALTER TABLE part_b_10_b_20 ATTACH PARTITION part_c_1_100 FOR VALUES FROM (1) TO (100); --- 'init_range_parted'; --- 'show_data'; +-- \set init_range_parted 'truncate range_parted; insert into range_parted VALUES (''a'', 1, 1, 1), (''a'', 10, 200, 1), (''b'', 12, 96, 1), (''b'', 13, 97, 2), (''b'', 15, 105, 16), (''b'', 17, 105, 19)' +-- \set show_data 'select tableoid::regclass::text COLLATE "C" partname, * from range_parted ORDER BY 1, 2, 3, 4, 5, 6' +-- :init_range_parted; +-- :show_data; -- The order of subplans should be in bound order EXPLAIN (costs off) UPDATE range_parted set c = c - 50 WHERE c > 97; @@ -226,14 +228,14 @@ UPDATE range_parted set e = d; UPDATE part_c_1_100 set c = c + 20 WHERE c = 98; -- ok, row movement UPDATE part_b_10_b_20 set c = c + 20 returning c, b, a; --- 'show_data'; +-- :show_data; -- fail, row movement happens only within the partition subtree. UPDATE part_b_10_b_20 set b = b - 6 WHERE c > 116 returning *; -- ok, row movement, with subset of rows moved into different partition. UPDATE range_parted set b = b - 6 WHERE c > 116 returning a, b + c; --- 'show_data'; +-- :show_data; -- Common table needed for multiple test scenarios. CREATE TABLE mintab(c1 int); @@ -250,19 +252,19 @@ UPDATE upview set a = 'b', b = 15, c = 120 WHERE b = 4; -- ok, row movement, check option passes UPDATE upview set a = 'b', b = 15 WHERE b = 4; --- 'show_data'; +-- :show_data; -- cleanup DROP VIEW upview; -- RETURNING having whole-row vars. --- 'init_range_parted'; +-- :init_range_parted; UPDATE range_parted set c = 95 WHERE a = 'b' and b > 10 and c > 100 returning (range_parted), *; --- 'show_data'; +-- :show_data; -- Transition tables with update row movement --- 'init_range_parted'; +-- :init_range_parted; CREATE FUNCTION trans_updatetrigfunc() RETURNS trigger LANGUAGE plpgsql AS $$ @@ -280,8 +282,8 @@ CREATE TRIGGER trans_updatetrig FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end ) WHERE a = 'b' and b > 10 and c >= 96; --- 'show_data'; --- 'init_range_parted'; +-- :show_data; +-- :init_range_parted; -- Enabling OLD TABLE capture for both DELETE as well as UPDATE stmt triggers -- should not cause DELETEd rows to be captured twice. Similar thing for @@ -293,7 +295,7 @@ CREATE TRIGGER trans_inserttrig AFTER INSERT ON range_parted REFERENCING NEW TABLE AS new_table FOR EACH STATEMENT EXECUTE PROCEDURE trans_updatetrigfunc(); UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; --- 'show_data'; +-- :show_data; DROP TRIGGER trans_deletetrig ON range_parted; DROP TRIGGER trans_inserttrig ON range_parted; -- Don't drop trans_updatetrig yet. It is required below. @@ -316,19 +318,19 @@ CREATE TRIGGER trig_d1_15 BEFORE UPDATE OR INSERT ON part_d_1_15 FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); CREATE TRIGGER trig_d15_20 BEFORE UPDATE OR INSERT ON part_d_15_20 FOR EACH ROW EXECUTE PROCEDURE func_parted_mod_b(); --- 'init_range_parted'; +-- :init_range_parted; UPDATE range_parted set c = (case when c = 96 then 110 else c + 1 end) WHERE a = 'b' and b > 10 and c >= 96; --- 'show_data'; --- 'init_range_parted'; +-- :show_data; +-- :init_range_parted; UPDATE range_parted set c = c + 50 WHERE a = 'b' and b > 10 and c >= 96; --- 'show_data'; +-- :show_data; -- Case where per-partition tuple conversion map array is allocated, but the -- map is not required for the particular tuple that is routed, thanks to -- matching table attributes of the partition and the target table. --- 'init_range_parted'; +-- :init_range_parted; UPDATE range_parted set b = 15 WHERE b = 1; --- 'show_data'; +-- :show_data; DROP TRIGGER trans_updatetrig ON range_parted; DROP TRIGGER trig_c1_100 ON part_c_1_100; @@ -345,7 +347,7 @@ GRANT ALL ON range_parted, mintab TO regress_range_parted_user; CREATE POLICY seeall ON range_parted AS PERMISSIVE FOR SELECT USING (true); CREATE POLICY policy_range_parted ON range_parted for UPDATE USING (true) WITH CHECK (c % 2 = 0); --- 'init_range_parted'; +-- :init_range_parted; SET SESSION AUTHORIZATION regress_range_parted_user; -- This should fail with RLS violation error while moving row from -- part_a_10_a_20 to part_d_1_15, because we are setting 'c' to an odd number. @@ -361,7 +363,7 @@ END $$ LANGUAGE plpgsql; CREATE TRIGGER trig_d_1_15 BEFORE INSERT ON part_d_1_15 FOR EACH ROW EXECUTE PROCEDURE func_d_1_15(); --- 'init_range_parted'; +-- :init_range_parted; SET SESSION AUTHORIZATION regress_range_parted_user; -- Here, RLS checks should succeed while moving row from part_a_10_a_20 to @@ -370,7 +372,7 @@ SET SESSION AUTHORIZATION regress_range_parted_user; UPDATE range_parted set a = 'b', c = 151 WHERE a = 'a' and c = 200; RESET SESSION AUTHORIZATION; --- 'init_range_parted'; +-- :init_range_parted; SET SESSION AUTHORIZATION regress_range_parted_user; -- This should fail with RLS violation error. Even though the UPDATE is setting -- 'c' to an even number, the trigger at the destination partition again makes @@ -384,7 +386,7 @@ DROP FUNCTION func_d_1_15(); -- Policy expression contains SubPlan RESET SESSION AUTHORIZATION; --- 'init_range_parted'; +-- :init_range_parted; CREATE POLICY policy_range_parted_subplan on range_parted AS RESTRICTIVE for UPDATE USING (true) WITH CHECK ((SELECT range_parted.c <= c1 FROM mintab)); @@ -397,14 +399,14 @@ UPDATE range_parted set a = 'b', c = 120 WHERE a = 'a' and c = 200; -- RLS policy expression contains whole row. RESET SESSION AUTHORIZATION; --- 'init_range_parted'; +-- :init_range_parted; CREATE POLICY policy_range_parted_wholerow on range_parted AS RESTRICTIVE for UPDATE USING (true) WITH CHECK (range_parted = row('b', 10, 112, 1, NULL)::range_parted); SET SESSION AUTHORIZATION regress_range_parted_user; -- ok, should pass the RLS check UPDATE range_parted set a = 'b', c = 112 WHERE a = 'a' and c = 200; RESET SESSION AUTHORIZATION; --- 'init_range_parted'; +-- :init_range_parted; SET SESSION AUTHORIZATION regress_range_parted_user; -- fail, the whole row RLS check should fail UPDATE range_parted set a = 'b', c = 116 WHERE a = 'a' and c = 200; @@ -422,7 +424,7 @@ DROP TABLE mintab; -- statement triggers with update row movement --------------------------------------------------- --- 'init_range_parted'; +-- :init_range_parted; CREATE FUNCTION trigfunc() returns trigger language plpgsql as $$ @@ -466,7 +468,7 @@ CREATE TRIGGER d15_insert_trig -- Move all rows from part_c_100_200 to part_c_1_100. None of the delete or -- insert statement triggers should be fired. UPDATE range_parted set c = c - 50 WHERE c > 97; --- 'show_data'; +-- :show_data; DROP TRIGGER parent_delete_trig ON range_parted; DROP TRIGGER parent_update_trig ON range_parted; @@ -483,15 +485,16 @@ DROP TRIGGER d15_insert_trig ON part_d_15_20; -- Creating default partition for range --- 'init_range_parted'; +-- :init_range_parted; create table part_def partition of range_parted default; +-- \d+ part_def insert into range_parted values ('c', 9); -- ok update part_def set a = 'd' where a = 'c'; -- fail update part_def set a = 'a' where a = 'd'; --- 'show_data'; +-- :show_data; -- Update row movement from non-default to default partition. -- fail, default partition is not under part_a_10_a_20; @@ -499,12 +502,12 @@ UPDATE part_a_10_a_20 set a = 'ad' WHERE a = 'a'; -- ok UPDATE range_parted set a = 'ad' WHERE a = 'a'; UPDATE range_parted set a = 'bd' WHERE a = 'b'; --- 'show_data'; +-- :show_data; -- Update row movement from default to non-default partitions. -- ok UPDATE range_parted set a = 'a' WHERE a = 'ad'; UPDATE range_parted set a = 'b' WHERE a = 'bd'; --- 'show_data'; +-- :show_data; -- Cleanup: range_parted no longer needed. DROP TABLE range_parted; diff --git a/crates/squawk_parser/tests/data/regression_suite/vacuum.sql b/crates/squawk_parser/tests/data/regression_suite/vacuum.sql index 45bd9147..ae76d533 100644 --- a/crates/squawk_parser/tests/data/regression_suite/vacuum.sql +++ b/crates/squawk_parser/tests/data/regression_suite/vacuum.sql @@ -324,8 +324,8 @@ DROP TABLE only_inh_parent CASCADE; -- parenthesized syntax for ANALYZE ANALYZE (VERBOSE) does_not_exist; --- ANALYZE (nonexistent-arg) does_not_exist; --- ANALYZE (nonexistentarg) does_not_exit; +ANALYZE (nonexistent-arg) does_not_exist; +ANALYZE (nonexistentarg) does_not_exit; -- ensure argument order independence, and that SKIP_LOCKED on non-existing -- relation still errors out. Suppress WARNING messages caused by concurrent @@ -379,9 +379,9 @@ VACUUM (PROCESS_MAIN FALSE, PROCESS_TOAST FALSE) vac_option_tab; SELECT * FROM vac_option_tab_counts; -- Check if the filenodes nodes have been updated as wanted after FULL. SELECT relfilenode AS main_filenode FROM pg_class - WHERE relname = 'vac_option_tab' ; + WHERE relname = 'vac_option_tab' /* \gset */; SELECT t.relfilenode AS toast_filenode FROM pg_class c, pg_class t - WHERE c.reltoastrelid = t.oid AND c.relname = 'vac_option_tab' ; + WHERE c.reltoastrelid = t.oid AND c.relname = 'vac_option_tab' /* \gset */; -- Only the toast relation is processed. VACUUM (PROCESS_MAIN FALSE, FULL) vac_option_tab; SELECT relfilenode = 'main_filenode' AS is_same_main_filenode @@ -495,3 +495,33 @@ RESET ROLE; DROP TABLE vacowned; DROP TABLE vacowned_parted; DROP ROLE regress_vacuum; + +-- Test checking how new toast values are allocated on rewrite. +-- Create table with plain storage (forces inline storage initially). +CREATE TABLE vac_rewrite_toast (id int, f1 TEXT STORAGE plain); +-- Insert tuple large enough to trigger toast storage on rewrite, still +-- small enough to fit on a page. +INSERT INTO vac_rewrite_toast values (1, repeat('a', 7000)); +-- Switch to external storage to force toast table usage. +ALTER TABLE vac_rewrite_toast ALTER COLUMN f1 SET STORAGE EXTERNAL; +-- This second tuple is toasted, its value should still be the +-- same after rewrite. +INSERT INTO vac_rewrite_toast values (2, repeat('a', 7000)); +SELECT pg_column_toast_chunk_id(f1) AS id_2_chunk FROM vac_rewrite_toast + WHERE id = 2 /* \gset */; +-- Check initial state of the data. +SELECT id, pg_column_toast_chunk_id(f1) IS NULL AS f1_chunk_null, + substr(f1, 5, 10) AS f1_data, + pg_column_compression(f1) AS f1_comp + FROM vac_rewrite_toast ORDER BY id; +-- VACUUM FULL forces toast data rewrite. +VACUUM FULL vac_rewrite_toast; +-- Check after rewrite. +SELECT id, pg_column_toast_chunk_id(f1) IS NULL AS f1_chunk_null, + substr(f1, 5, 10) AS f1_data, + pg_column_compression(f1) AS f1_comp + FROM vac_rewrite_toast ORDER BY id; +-- The same value is reused for the tuple toasted before the rewrite. +SELECT pg_column_toast_chunk_id(f1) = 'id_2_chunk' AS same_chunk + FROM vac_rewrite_toast WHERE id = 2; +DROP TABLE vac_rewrite_toast; diff --git a/crates/squawk_parser/tests/data/regression_suite/window.sql b/crates/squawk_parser/tests/data/regression_suite/window.sql index 66edb349..f088264e 100644 --- a/crates/squawk_parser/tests/data/regression_suite/window.sql +++ b/crates/squawk_parser/tests/data/regression_suite/window.sql @@ -1522,6 +1522,19 @@ SELECT * FROM FROM empsalary) emp WHERE first_emp = 1 OR last_emp = 1; +CREATE INDEX empsalary_salary_empno_idx ON empsalary (salary, empno); + +SET enable_seqscan = 0; + +-- Ensure no sorting is done and that the IndexScan maintains all pathkeys +-- useful for the final sort order. +EXPLAIN (COSTS OFF) +SELECT salary, empno, row_number() OVER (ORDER BY salary) rn +FROM empsalary +ORDER BY salary, empno; + +RESET enable_seqscan; + -- cleanup DROP TABLE empsalary; @@ -1958,3 +1971,165 @@ $$ LANGUAGE SQL STABLE; EXPLAIN (costs off) SELECT * FROM pg_temp.f(2); SELECT * FROM pg_temp.f(2); + +-- IGNORE NULLS tests + +CREATE TEMPORARY TABLE planets ( + name text, + distance text, + orbit integer +); + +INSERT INTO planets VALUES + ('mercury', 'close', 88), + ('venus', 'close', 224), + ('earth', 'close', NULL), + ('mars', 'close', NULL), + ('jupiter', 'close', 4332), + ('saturn', 'far', 24491), + ('uranus', 'far', NULL), + ('neptune', 'far', 60182), + ('pluto', 'far', 90560), + ('xyzzy', 'far', NULL); + +-- test ruleutils +CREATE VIEW planets_view AS +SELECT name, + orbit, + lag(orbit) OVER w AS lag, + lag(orbit) RESPECT NULLS OVER w AS lag_respect, + lag(orbit) IGNORE NULLS OVER w AS lag_ignore +FROM planets +WINDOW w AS (ORDER BY name) +; +SELECT pg_get_viewdef('planets_view'); + +-- lag +SELECT name, + orbit, + lag(orbit) OVER w AS lag, + lag(orbit) RESPECT NULLS OVER w AS lag_respect, + lag(orbit) IGNORE NULLS OVER w AS lag_ignore +FROM planets +WINDOW w AS (ORDER BY name) +; + +-- lead +SELECT name, + orbit, + lead(orbit) OVER w AS lead, + lead(orbit) RESPECT NULLS OVER w AS lead_respect, + lead(orbit) IGNORE NULLS OVER w AS lead_ignore +FROM planets +WINDOW w AS (ORDER BY name) +; + +-- first_value +SELECT name, + orbit, + first_value(orbit) RESPECT NULLS OVER w1, + first_value(orbit) IGNORE NULLS OVER w1, + first_value(orbit) RESPECT NULLS OVER w2, + first_value(orbit) IGNORE NULLS OVER w2 +FROM planets +WINDOW w1 AS (ORDER BY name ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), + w2 AS (ORDER BY name ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +; + +-- nth_value +SELECT name, + orbit, + nth_value(orbit, 2) RESPECT NULLS OVER w1, + nth_value(orbit, 2) IGNORE NULLS OVER w1, + nth_value(orbit, 2) RESPECT NULLS OVER w2, + nth_value(orbit, 2) IGNORE NULLS OVER w2 +FROM planets +WINDOW w1 AS (ORDER BY name ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), + w2 AS (ORDER BY name ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +; + +-- last_value +SELECT name, + orbit, + last_value(orbit) RESPECT NULLS OVER w1, + last_value(orbit) IGNORE NULLS OVER w1, + last_value(orbit) RESPECT NULLS OVER w2, + last_value(orbit) IGNORE NULLS OVER w2 +FROM planets +WINDOW w1 AS (ORDER BY name ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING), + w2 AS (ORDER BY name ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +; + +-- exclude current row +SELECT name, + orbit, + first_value(orbit) IGNORE NULLS OVER w, + last_value(orbit) IGNORE NULLS OVER w, + nth_value(orbit, 2) IGNORE NULLS OVER w, + lead(orbit, 1) IGNORE NULLS OVER w AS lead_ignore, + lag(orbit, 1) IGNORE NULLS OVER w AS lag_ignore +FROM planets +WINDOW w AS (ORDER BY name ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE CURRENT ROW) +; + +-- valid and invalid functions +SELECT sum(orbit) OVER () FROM planets; -- succeeds +SELECT sum(orbit) RESPECT NULLS OVER () FROM planets; -- fails +SELECT sum(orbit) IGNORE NULLS OVER () FROM planets; -- fails +SELECT row_number() OVER () FROM planets; -- succeeds +SELECT row_number() RESPECT NULLS OVER () FROM planets; -- fails +SELECT row_number() IGNORE NULLS OVER () FROM planets; -- fails +SELECT rank() OVER () FROM planets; -- succeeds +SELECT rank() RESPECT NULLS OVER () FROM planets; -- fails +SELECT rank() IGNORE NULLS OVER () FROM planets; -- fails +SELECT dense_rank() OVER () FROM planets; -- succeeds +SELECT dense_rank() RESPECT NULLS OVER () FROM planets; -- fails +SELECT dense_rank() IGNORE NULLS OVER () FROM planets; -- fails +SELECT percent_rank() OVER () FROM planets; -- succeeds +SELECT percent_rank() RESPECT NULLS OVER () FROM planets; -- fails +SELECT percent_rank() IGNORE NULLS OVER () FROM planets; -- fails +SELECT cume_dist() OVER () FROM planets; -- succeeds +SELECT cume_dist() RESPECT NULLS OVER () FROM planets; -- fails +SELECT cume_dist() IGNORE NULLS OVER () FROM planets; -- fails +SELECT ntile(1) OVER () FROM planets; -- succeeds +SELECT ntile(1) RESPECT NULLS OVER () FROM planets; -- fails +SELECT ntile(1) IGNORE NULLS OVER () FROM planets; -- fails + +-- test two consecutive nulls +update planets set orbit=null where name='jupiter'; +SELECT name, + orbit, + first_value(orbit) IGNORE NULLS OVER w, + last_value(orbit) IGNORE NULLS OVER w, + nth_value(orbit, 2) IGNORE NULLS OVER w, + lead(orbit, 1) IGNORE NULLS OVER w AS lead_ignore, + lag(orbit, 1) IGNORE NULLS OVER w AS lag_ignore +FROM planets +WINDOW w AS (ORDER BY name ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +; + +-- test partitions +SELECT name, + distance, + orbit, + first_value(orbit) IGNORE NULLS OVER w, + last_value(orbit) IGNORE NULLS OVER w, + nth_value(orbit, 2) IGNORE NULLS OVER w, + lead(orbit, 1) IGNORE NULLS OVER w AS lead_ignore, + lag(orbit, 1) IGNORE NULLS OVER w AS lag_ignore +FROM planets +WINDOW w AS (PARTITION BY distance ORDER BY name ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING) +; + +-- nth_value without nulls +SELECT x, + nth_value(x,2) IGNORE NULLS OVER w +FROM generate_series(1,5) g(x) +WINDOW w AS (ORDER BY x ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING EXCLUDE CURRENT ROW); +SELECT x, + nth_value(x,2) IGNORE NULLS OVER w +FROM generate_series(1,5) g(x) +WINDOW w AS (ORDER BY x ROWS BETWEEN 2 PRECEDING AND 2 FOLLOWING); + +--cleanup +DROP TABLE planets CASCADE; diff --git a/crates/squawk_parser/tests/data/regression_suite/with.sql b/crates/squawk_parser/tests/data/regression_suite/with.sql index f811c447..8e1af0df 100644 --- a/crates/squawk_parser/tests/data/regression_suite/with.sql +++ b/crates/squawk_parser/tests/data/regression_suite/with.sql @@ -277,6 +277,7 @@ UNION ALL ) SELECT sum(n) FROM t; +-- \d+ sums_1_100 -- corner case in which sub-WITH gets initialized first with recursive q as ( @@ -1096,6 +1097,30 @@ select ( with cte(foo) as ( values(f1) ) values((select foo from cte)) ) from int4_tbl; +-- +-- test for bug #19055: interaction of WITH with aggregates +-- +-- For now, we just throw an error if there's a use of a CTE below the +-- semantic level that the SQL standard assigns to the aggregate. +-- It's not entirely clear what we could do instead that doesn't risk +-- breaking more things than it fixes. +select f1, (with cte1(x,y) as (select 1,2) + select count((select i4.f1 from cte1))) as ss +from int4_tbl i4; + +-- +-- test for bug #19106: interaction of WITH with aggregates +-- +-- the initial fix for #19055 was too aggressive and broke this case +explain (verbose, costs off) +with a as ( select id from (values (1), (2)) as v(id) ), + b as ( select max((select sum(id) from a)) as agg ) +select agg from b; + +with a as ( select id from (values (1), (2)) as v(id) ), + b as ( select max((select sum(id) from a)) as agg ) +select agg from b; + -- -- test for nested-recursive-WITH bug -- @@ -1335,6 +1360,29 @@ COMMIT; SELECT * FROM bug6051_3; +-- check that recursive CTE processing doesn't rewrite a CTE more than once +-- (must not try to expand GENERATED ALWAYS IDENTITY columns more than once) +CREATE TEMP TABLE id_alw1 (i int GENERATED ALWAYS AS IDENTITY); + +CREATE TEMP TABLE id_alw2 (i int GENERATED ALWAYS AS IDENTITY); +CREATE TEMP VIEW id_alw2_view AS SELECT * FROM id_alw2; + +CREATE TEMP TABLE id_alw3 (i int GENERATED ALWAYS AS IDENTITY); +CREATE RULE id_alw3_ins AS ON INSERT TO id_alw3 DO INSTEAD + WITH t1 AS (INSERT INTO id_alw1 DEFAULT VALUES RETURNING i) + INSERT INTO id_alw2_view DEFAULT VALUES RETURNING i; +CREATE TEMP VIEW id_alw3_view AS SELECT * FROM id_alw3; + +CREATE TEMP TABLE id_alw4 (i int GENERATED ALWAYS AS IDENTITY); + +WITH t4 AS (INSERT INTO id_alw4 DEFAULT VALUES RETURNING i) + INSERT INTO id_alw3_view DEFAULT VALUES RETURNING i; + +SELECT * from id_alw1; +SELECT * from id_alw2; +SELECT * from id_alw3; +SELECT * from id_alw4; + -- check case where CTE reference is removed due to optimization EXPLAIN (VERBOSE, COSTS OFF) SELECT q1 FROM diff --git a/crates/squawk_parser/tests/data/regression_suite/without_overlaps.sql b/crates/squawk_parser/tests/data/regression_suite/without_overlaps.sql index 53c1e424..8a06646d 100644 --- a/crates/squawk_parser/tests/data/regression_suite/without_overlaps.sql +++ b/crates/squawk_parser/tests/data/regression_suite/without_overlaps.sql @@ -42,15 +42,18 @@ CREATE TABLE temporal_rng ( valid_at daterange, CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) ); +-- \d temporal_rng SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng_pk'; -- PK from LIKE: CREATE TABLE temporal_rng2 (LIKE temporal_rng INCLUDING ALL); +-- \d temporal_rng2 DROP TABLE temporal_rng2; -- no PK from INHERITS: CREATE TABLE temporal_rng2 () INHERITS (temporal_rng); +-- \d temporal_rng2 DROP TABLE temporal_rng2; DROP TABLE temporal_rng; @@ -62,6 +65,7 @@ CREATE TABLE temporal_rng ( CREATE TABLE temporal_rng2 ( CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) ) INHERITS (temporal_rng); +-- \d temporal_rng2 DROP TABLE temporal_rng CASCADE; -- Add PK to already inheriting table: @@ -72,6 +76,7 @@ CREATE TABLE temporal_rng ( CREATE TABLE temporal_rng2 () INHERITS (temporal_rng); ALTER TABLE temporal_rng2 ADD CONSTRAINT temporal_rng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS); +-- \d temporal_rng2 DROP TABLE temporal_rng2; DROP TABLE temporal_rng; @@ -82,6 +87,7 @@ CREATE TABLE temporal_rng2 ( valid_at daterange, CONSTRAINT temporal_rng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) ); +-- \d temporal_rng2 SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng2_pk'; @@ -102,6 +108,7 @@ CREATE TABLE temporal_mltrng ( valid_at datemultirange, CONSTRAINT temporal_mltrng_pk PRIMARY KEY (id, valid_at WITHOUT OVERLAPS) ); +-- \d temporal_mltrng SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_mltrng_pk'; SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_mltrng_pk'; @@ -112,6 +119,7 @@ CREATE TABLE temporal_mltrng2 ( valid_at datemultirange, CONSTRAINT temporal_mltrng2_pk PRIMARY KEY (id1, id2, valid_at WITHOUT OVERLAPS) ); +-- \d temporal_mltrng2 SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_mltrng2_pk'; SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_mltrng2_pk'; @@ -144,6 +152,7 @@ CREATE TABLE temporal_rng3 ( valid_at daterange, CONSTRAINT temporal_rng3_uq UNIQUE (id, valid_at WITHOUT OVERLAPS) ); +-- \d temporal_rng3 SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; DROP TABLE temporal_rng3; @@ -155,6 +164,7 @@ CREATE TABLE temporal_rng3 ( valid_at daterange, CONSTRAINT temporal_rng3_uq UNIQUE (id1, id2, valid_at WITHOUT OVERLAPS) ); +-- \d temporal_rng3 SELECT pg_get_constraintdef(oid) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; SELECT pg_get_indexdef(conindid, 0, true) FROM pg_constraint WHERE conname = 'temporal_rng3_uq'; DROP TABLE temporal_rng3; @@ -681,7 +691,9 @@ SELECT * FROM tp2 ORDER BY id, valid_at; DROP TABLE temporal_partitioned; -- ALTER TABLE REPLICA IDENTITY +-- \d temporal_rng ALTER TABLE temporal_rng REPLICA IDENTITY USING INDEX temporal_rng_pk; +-- \d temporal_rng -- -- ON CONFLICT: ranges @@ -1073,6 +1085,7 @@ CREATE TABLE temporal_fk2_rng2rng ( CONSTRAINT temporal_fk2_rng2rng_fk FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_rng2 (id1, id2, PERIOD valid_at) ); +-- \d temporal_fk2_rng2rng DROP TABLE temporal_fk2_rng2rng; -- @@ -1101,6 +1114,7 @@ ALTER TABLE temporal_fk2_rng2rng ADD CONSTRAINT temporal_fk2_rng2rng_fk FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_rng2 (id1, id2, PERIOD valid_at); +-- \d temporal_fk2_rng2rng -- with inferred PK on the referenced table, and wrong column type: ALTER TABLE temporal_fk_rng2rng @@ -1514,6 +1528,7 @@ CREATE TABLE temporal_fk2_mltrng2mltrng ( CONSTRAINT temporal_fk2_mltrng2mltrng_fk FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_mltrng2 (id1, id2, PERIOD valid_at) ); +-- \d temporal_fk2_mltrng2mltrng DROP TABLE temporal_fk2_mltrng2mltrng; -- @@ -1544,6 +1559,7 @@ ALTER TABLE temporal_fk2_mltrng2mltrng ADD CONSTRAINT temporal_fk2_mltrng2mltrng_fk FOREIGN KEY (parent_id1, parent_id2, PERIOD valid_at) REFERENCES temporal_mltrng2 (id1, id2, PERIOD valid_at); +-- \d temporal_fk2_mltrng2mltrng -- should fail because of duplicate referenced columns: ALTER TABLE temporal_fk_mltrng2mltrng diff --git a/crates/squawk_parser/tests/data/regression_suite/xid.sql b/crates/squawk_parser/tests/data/regression_suite/xid.sql index eea33c04..ed2b7cd2 100644 --- a/crates/squawk_parser/tests/data/regression_suite/xid.sql +++ b/crates/squawk_parser/tests/data/regression_suite/xid.sql @@ -128,21 +128,21 @@ SELECT pg_snapshot '1:9223372036854775808:3'; -- test pg_current_xact_id_if_assigned BEGIN; SELECT pg_current_xact_id_if_assigned() IS NULL; -SELECT pg_current_xact_id() ; +SELECT pg_current_xact_id() /* \gset */; SELECT pg_current_xact_id_if_assigned() IS NOT DISTINCT FROM xid8 'pg_current_xact_id'; COMMIT; -- test xid status functions BEGIN; -SELECT pg_current_xact_id() AS committed ; +SELECT pg_current_xact_id() AS committed /* \gset */; COMMIT; BEGIN; -SELECT pg_current_xact_id() AS rolledback ; +SELECT pg_current_xact_id() AS rolledback /* \gset */; ROLLBACK; BEGIN; -SELECT pg_current_xact_id() AS inprogress ; +SELECT pg_current_xact_id() AS inprogress /* \gset */; SELECT pg_xact_status('committed'::text::xid8) AS committed; SELECT pg_xact_status('rolledback'::text::xid8) AS rolledback; diff --git a/crates/squawk_parser/tests/data/regression_suite/xml.sql b/crates/squawk_parser/tests/data/regression_suite/xml.sql index 7005013e..d459c817 100644 --- a/crates/squawk_parser/tests/data/regression_suite/xml.sql +++ b/crates/squawk_parser/tests/data/regression_suite/xml.sql @@ -346,7 +346,9 @@ SELECT xml_is_well_formed('abc'); -- attribute values. -- Since different libxml versions emit slightly different -- error messages, we suppress the DETAIL in this test. +-- \set VERBOSITY terse SELECT xpath('/*', ''); +-- \set VERBOSITY default -- Again, the XML isn't well-formed for namespace purposes SELECT xpath('/*', ''); @@ -425,6 +427,7 @@ CREATE VIEW xmltableview1 AS SELECT xmltable.* SELECT * FROM xmltableview1; +-- \sv xmltableview1 EXPLAIN (COSTS OFF) SELECT * FROM xmltableview1; EXPLAIN (COSTS OFF, VERBOSE) SELECT * FROM xmltableview1; @@ -447,6 +450,7 @@ CREATE VIEW xmltableview2 AS SELECT * FROM XMLTABLE(XMLNAMESPACES('http://x.y' A SELECT * FROM xmltableview2; +-- \sv xmltableview2 SELECT * FROM XMLTABLE(XMLNAMESPACES(DEFAULT 'http://x.y'), '/rows/row' @@ -663,7 +667,9 @@ SELECT xmltable.* FROM xmltest2, LATERAL xmltable(('/d/r/' || lower(_path) || 'c -- XPath result can be boolean or number too SELECT * FROM XMLTABLE('*' PASSING 'a' COLUMNS a xml PATH '.', b text PATH '.', c text PATH '"hi"', d boolean PATH '. = "a"', e integer PATH 'string-length(.)'); +-- \x SELECT * FROM XMLTABLE('*' PASSING 'pre&deeppost' COLUMNS x xml PATH '/e/n2', y xml PATH '/'); +-- \x SELECT * FROM XMLTABLE('.' PASSING XMLELEMENT(NAME a) columns a varchar(20) PATH '""', b xml PATH '""'); diff --git a/crates/squawk_parser/tests/snapshots/tests__alter_subscription_ok.snap b/crates/squawk_parser/tests/snapshots/tests__alter_subscription_ok.snap index e8085104..6868ffb3 100644 --- a/crates/squawk_parser/tests/snapshots/tests__alter_subscription_ok.snap +++ b/crates/squawk_parser/tests/snapshots/tests__alter_subscription_ok.snap @@ -315,7 +315,7 @@ SOURCE_FILE L_PAREN "(" ATTRIBUTE_OPTION NAME - IDENT "lsn" + LSN_KW "lsn" WHITESPACE " " EQ "=" WHITESPACE " " diff --git a/crates/squawk_parser/tests/snapshots/tests__copy_err.snap b/crates/squawk_parser/tests/snapshots/tests__copy_err.snap index 59a9c5eb..bde40001 100644 --- a/crates/squawk_parser/tests/snapshots/tests__copy_err.snap +++ b/crates/squawk_parser/tests/snapshots/tests__copy_err.snap @@ -29,17 +29,20 @@ SOURCE_FILE LITERAL STRING "'/tmp/input.file'" WHITESPACE " " - L_PAREN "(" - NAME - IDENT "on_error" - WHITESPACE " " - IDENT "ignore" - WHITESPACE " " - NAME - IDENT "log_verbosity" - WHITESPACE " " - VERBOSE_KW "verbose" - R_PAREN ")" + COPY_OPTION_LIST + L_PAREN "(" + COPY_OPTION + NAME + IDENT "on_error" + WHITESPACE " " + IGNORE_KW "ignore" + WHITESPACE " " + COPY_OPTION + NAME + IDENT "log_verbosity" + WHITESPACE " " + VERBOSE_KW "verbose" + R_PAREN ")" SEMICOLON ";" WHITESPACE "\n" --- diff --git a/crates/squawk_parser/tests/snapshots/tests__copy_ok.snap b/crates/squawk_parser/tests/snapshots/tests__copy_ok.snap index 05260e91..10d85084 100644 --- a/crates/squawk_parser/tests/snapshots/tests__copy_ok.snap +++ b/crates/squawk_parser/tests/snapshots/tests__copy_ok.snap @@ -33,13 +33,15 @@ SOURCE_FILE WHITESPACE " " STDOUT_KW "STDOUT" WHITESPACE " " - L_PAREN "(" - NAME - DELIMITER_KW "DELIMITER" - WHITESPACE " " - LITERAL - STRING "'|'" - R_PAREN ")" + COPY_OPTION_LIST + L_PAREN "(" + COPY_OPTION + NAME + DELIMITER_KW "DELIMITER" + WHITESPACE " " + LITERAL + STRING "'|'" + R_PAREN ")" SEMICOLON ";" WHITESPACE "\n\n" COMMENT "-- copy_from" @@ -143,20 +145,23 @@ SOURCE_FILE LITERAL STRING "'/tmp/input.file'" WHITESPACE " " - L_PAREN "(" - WHITESPACE " " - NAME - IDENT "on_error" - WHITESPACE " " - IDENT "ignore" - COMMA "," - WHITESPACE " " - NAME - IDENT "log_verbosity" - WHITESPACE " " - VERBOSE_KW "verbose" - WHITESPACE " " - R_PAREN ")" + COPY_OPTION_LIST + L_PAREN "(" + WHITESPACE " " + COPY_OPTION + NAME + IDENT "on_error" + WHITESPACE " " + IGNORE_KW "ignore" + COMMA "," + WHITESPACE " " + COPY_OPTION + NAME + IDENT "log_verbosity" + WHITESPACE " " + VERBOSE_KW "verbose" + WHITESPACE " " + R_PAREN ")" SEMICOLON ";" WHITESPACE "\n\n" COMMENT "-- on_error" @@ -174,14 +179,16 @@ SOURCE_FILE LITERAL STRING "'/tmp/copy.data'" WHITESPACE " " - L_PAREN "(" - WHITESPACE " " - NAME - IDENT "on_error" - WHITESPACE " " - IDENT "ignore" - WHITESPACE " " - R_PAREN ")" + COPY_OPTION_LIST + L_PAREN "(" + WHITESPACE " " + COPY_OPTION + NAME + IDENT "on_error" + WHITESPACE " " + IGNORE_KW "ignore" + WHITESPACE " " + R_PAREN ")" SEMICOLON ";" WHITESPACE "\n\n" COMMENT "-- all_the_options" @@ -199,176 +206,210 @@ SOURCE_FILE LITERAL STRING "'foo'" WHITESPACE " " - L_PAREN "(" - WHITESPACE "\n " - NAME - FORMAT_KW "format" - WHITESPACE " " - CSV_KW "csv" - COMMA "," - WHITESPACE "\n " - NAME - FREEZE_KW "freeze" - COMMA "," - WHITESPACE "\n " - NAME - FREEZE_KW "freeze" - WHITESPACE " " - LITERAL - TRUE_KW "true" - COMMA "," - WHITESPACE "\n " - NAME - FREEZE_KW "freeze" - WHITESPACE " " - LITERAL - FALSE_KW "false" - COMMA "," - WHITESPACE "\n " - NAME - DELIMITER_KW "delimiter" - WHITESPACE " " - LITERAL - STRING "','" - COMMA "," - WHITESPACE "\n " - NAME - NULL_KW "null" - WHITESPACE " " - LITERAL - STRING "'\\n'" - COMMA "," - WHITESPACE "\n " - NAME - DEFAULT_KW "default" - WHITESPACE " " - LITERAL - STRING "'foo'" - COMMA "," - WHITESPACE "\n " - NAME - HEADER_KW "header" - COMMA "," - WHITESPACE "\n " - NAME - HEADER_KW "header" - WHITESPACE " " - LITERAL - TRUE_KW "true" - COMMA "," - WHITESPACE "\n " - NAME - HEADER_KW "header" - WHITESPACE " " - LITERAL - FALSE_KW "false" - COMMA "," - WHITESPACE "\n " - NAME - HEADER_KW "header" - WHITESPACE " " - MATCH_KW "match" - COMMA "," - WHITESPACE "\n " - NAME - QUOTE_KW "quote" - WHITESPACE " " - LITERAL - STRING "'foo'" - COMMA "," - WHITESPACE "\n " - NAME - ESCAPE_KW "escape" - WHITESPACE " " - LITERAL - STRING "'bar'" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "force_quote" - WHITESPACE " " - STAR "*" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "force_quote" - WHITESPACE " " - L_PAREN "(" - NAME - IDENT "a" - COMMA "," - WHITESPACE " " - NAME - IDENT "b" - COMMA "," - WHITESPACE " " - NAME - IDENT "c" - COMMA "," - WHITESPACE " " - NAME - IDENT "d" - R_PAREN ")" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "force_not_null" - WHITESPACE " " - STAR "*" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "force_not_null" - WHITESPACE " " - L_PAREN "(" - NAME - IDENT "a" - R_PAREN ")" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "force_null" - WHITESPACE " " - STAR "*" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "force_null" - WHITESPACE " " - L_PAREN "(" - NAME - IDENT "a" - COMMA "," - WHITESPACE " " - NAME - IDENT "b" - R_PAREN ")" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "on_error" - WHITESPACE " " - IDENT "stop" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "on_error" - WHITESPACE " " - IDENT "ignore" - COMMA "," - WHITESPACE "\n " - NAME - ENCODING_KW "encoding" - WHITESPACE " " - LITERAL - STRING "'utf8'" - COMMA "," - WHITESPACE "\n " - NAME - IDENT "log_verbosity" - WHITESPACE " " - VERBOSE_KW "verbose" - WHITESPACE "\n" - R_PAREN ")" + COPY_OPTION_LIST + L_PAREN "(" + WHITESPACE "\n " + COPY_OPTION + NAME + FORMAT_KW "format" + WHITESPACE " " + CSV_KW "csv" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + FREEZE_KW "freeze" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + FREEZE_KW "freeze" + WHITESPACE " " + LITERAL + TRUE_KW "true" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + FREEZE_KW "freeze" + WHITESPACE " " + LITERAL + FALSE_KW "false" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + DELIMITER_KW "delimiter" + WHITESPACE " " + LITERAL + STRING "','" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + NULL_KW "null" + WHITESPACE " " + LITERAL + STRING "'\\n'" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + DEFAULT_KW "default" + WHITESPACE " " + LITERAL + STRING "'foo'" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + HEADER_KW "header" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + HEADER_KW "header" + WHITESPACE " " + LITERAL + TRUE_KW "true" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + HEADER_KW "header" + WHITESPACE " " + LITERAL + FALSE_KW "false" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + HEADER_KW "header" + WHITESPACE " " + MATCH_KW "match" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + QUOTE_KW "quote" + WHITESPACE " " + LITERAL + STRING "'foo'" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + ESCAPE_KW "escape" + WHITESPACE " " + LITERAL + STRING "'bar'" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "force_quote" + WHITESPACE " " + STAR "*" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "force_quote" + WHITESPACE " " + COPY_OPTION_LIST + L_PAREN "(" + COPY_OPTION + NAME + IDENT "a" + COMMA "," + WHITESPACE " " + COPY_OPTION + NAME + IDENT "b" + COMMA "," + WHITESPACE " " + COPY_OPTION + NAME + IDENT "c" + COMMA "," + WHITESPACE " " + COPY_OPTION + NAME + IDENT "d" + R_PAREN ")" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "force_not_null" + WHITESPACE " " + STAR "*" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "force_not_null" + WHITESPACE " " + COPY_OPTION_LIST + L_PAREN "(" + COPY_OPTION + NAME + IDENT "a" + R_PAREN ")" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "force_null" + WHITESPACE " " + STAR "*" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "force_null" + WHITESPACE " " + COPY_OPTION_LIST + L_PAREN "(" + COPY_OPTION + NAME + IDENT "a" + COMMA "," + WHITESPACE " " + COPY_OPTION + NAME + IDENT "b" + R_PAREN ")" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "on_error" + WHITESPACE " " + IDENT "stop" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "on_error" + WHITESPACE " " + IGNORE_KW "ignore" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + ENCODING_KW "encoding" + WHITESPACE " " + LITERAL + STRING "'utf8'" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + IDENT "log_verbosity" + WHITESPACE " " + VERBOSE_KW "verbose" + WHITESPACE "\n" + R_PAREN ")" SEMICOLON ";" WHITESPACE "\n\n" diff --git a/crates/squawk_parser/tests/snapshots/tests__misc_ok.snap b/crates/squawk_parser/tests/snapshots/tests__misc_ok.snap index da29372c..02a1a1fc 100644 --- a/crates/squawk_parser/tests/snapshots/tests__misc_ok.snap +++ b/crates/squawk_parser/tests/snapshots/tests__misc_ok.snap @@ -2014,21 +2014,24 @@ SOURCE_FILE WHITESPACE "\n " WITH_KW "WITH" WHITESPACE " " - L_PAREN "(" - WHITESPACE "\n " - NAME - FORMAT_KW "FORMAT" - WHITESPACE " " - CSV_KW "csv" - COMMA "," - WHITESPACE "\n " - NAME - DELIMITER_KW "DELIMITER" - WHITESPACE " " - LITERAL - ESC_STRING "E'\\t'" - WHITESPACE "\n " - R_PAREN ")" + COPY_OPTION_LIST + L_PAREN "(" + WHITESPACE "\n " + COPY_OPTION + NAME + FORMAT_KW "FORMAT" + WHITESPACE " " + CSV_KW "csv" + COMMA "," + WHITESPACE "\n " + COPY_OPTION + NAME + DELIMITER_KW "DELIMITER" + WHITESPACE " " + LITERAL + ESC_STRING "E'\\t'" + WHITESPACE "\n " + R_PAREN ")" SEMICOLON ";" WHITESPACE "\n\n" COMMENT "-- Enable pgvector" diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_compression_lz4.snap b/crates/squawk_parser/tests/snapshots/tests__regression_compression_lz4.snap new file mode 100644 index 00000000..92334982 --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_compression_lz4.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/compression_lz4.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_eager_aggregate.snap b/crates/squawk_parser/tests/snapshots/tests__regression_eager_aggregate.snap new file mode 100644 index 00000000..82fc82fd --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_eager_aggregate.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/eager_aggregate.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_errors.snap b/crates/squawk_parser/tests/snapshots/tests__regression_errors.snap index bb2f317b..6fd39f86 100644 --- a/crates/squawk_parser/tests/snapshots/tests__regression_errors.snap +++ b/crates/squawk_parser/tests/snapshots/tests__regression_errors.snap @@ -2,4 +2,460 @@ source: crates/squawk_parser/tests/tests.rs input_file: crates/squawk_parser/tests/data/regression_suite/errors.sql --- - +--- +error[syntax-error]: expected relation name + ╭▸ +49 │ delete from; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +131 │ drop index; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +134 │ drop index 314159; + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +134 │ drop index 314159; + ╰╴ ━ +error[syntax-error]: expected command, found INT_NUMBER + ╭▸ +134 │ drop index 314159; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +144 │ drop aggregate; + ╰╴ ━ +error[syntax-error]: expected L_PAREN + ╭▸ +144 │ drop aggregate; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +144 │ drop aggregate; + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +144 │ drop aggregate; + ╰╴ ━ +error[syntax-error]: expected L_PAREN + ╭▸ +147 │ drop aggregate newcnt1; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +147 │ drop aggregate newcnt1; + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +147 │ drop aggregate newcnt1; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected L_PAREN + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected command, found INT_NUMBER + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected command, found INT_KW + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected command, found R_PAREN + ╭▸ +150 │ drop aggregate 314159 (int); + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +166 │ drop function (); + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +169 │ drop function 314159(); + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +169 │ drop function 314159(); + ╰╴ ━ +error[syntax-error]: expected command, found INT_NUMBER + ╭▸ +169 │ drop function 314159(); + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +179 │ drop type; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +182 │ drop type 314159; + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +182 │ drop type 314159; + ╰╴ ━ +error[syntax-error]: expected command, found INT_NUMBER + ╭▸ +182 │ drop type 314159; + ╰╴ ━ +error[syntax-error]: expected operator, got SEMICOLON + ╭▸ +192 │ drop operator; + ╰╴ ━ +error[syntax-error]: expected L_PAREN + ╭▸ +192 │ drop operator; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +192 │ drop operator; + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +192 │ drop operator; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +192 │ drop operator; + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +192 │ drop operator; + ╰╴ ━ +error[syntax-error]: expected operator, got SEMICOLON + ╭▸ +195 │ drop operator equals; + ╰╴ ━ +error[syntax-error]: expected L_PAREN + ╭▸ +195 │ drop operator equals; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +195 │ drop operator equals; + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +195 │ drop operator equals; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +195 │ drop operator equals; + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +195 │ drop operator equals; + ╰╴ ━ +error[syntax-error]: expected L_PAREN + ╭▸ +198 │ drop operator ===; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +198 │ drop operator ===; + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +198 │ drop operator ===; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +198 │ drop operator ===; + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +198 │ drop operator ===; + ╰╴ ━ +error[syntax-error]: expected operator, got COMMA + ╭▸ +201 │ drop operator int4, int4; + ╰╴ ━ +error[syntax-error]: expected L_PAREN + ╭▸ +201 │ drop operator int4, int4; + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +201 │ drop operator int4, int4; + ╰╴ ━ +error[syntax-error]: expected R_PAREN + ╭▸ +201 │ drop operator int4, int4; + ╰╴ ━ +error[syntax-error]: expected operator, got L_PAREN + ╭▸ +204 │ drop operator (int4, int4); + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +207 │ drop operator === (); + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +207 │ drop operator === (); + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +207 │ drop operator === (); + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +210 │ drop operator === (int4); + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +210 │ drop operator === (int4); + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +216 │ drop operator = (nonesuch); + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +216 │ drop operator = (nonesuch); + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +219 │ drop operator = ( , int4); + ╰╴ ━ +error[syntax-error]: expected type name + ╭▸ +228 │ drop operator = (int4, ); + ╰╴ ━ +error[syntax-error]: expected name + ╭▸ +235 │ drop rule; + ╰╴ ━ +error[syntax-error]: expected ON_KW + ╭▸ +235 │ drop rule; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +235 │ drop rule; + ╰╴ ━ +error[syntax-error]: expected name + ╭▸ +238 │ drop rule 314159; + ╰╴ ━ +error[syntax-error]: expected ON_KW + ╭▸ +238 │ drop rule 314159; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +238 │ drop rule 314159; + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +238 │ drop rule 314159; + ╰╴ ━ +error[syntax-error]: expected command, found INT_NUMBER + ╭▸ +238 │ drop rule 314159; + ╰╴ ━ +error[syntax-error]: expected command, found DROP_KW + ╭▸ +244 │ drop tuple rule nonesuch; + ╰╴━ +error[syntax-error]: expected command, found IDENT + ╭▸ +244 │ drop tuple rule nonesuch; + ╰╴ ━ +error[syntax-error]: expected command, found RULE_KW + ╭▸ +244 │ drop tuple rule nonesuch; + ╰╴ ━ +error[syntax-error]: expected command, found IDENT + ╭▸ +244 │ drop tuple rule nonesuch; + ╰╴ ━ +error[syntax-error]: expected command, found DROP_KW + ╭▸ +245 │ drop instance rule nonesuch on noplace; + ╰╴━ +error[syntax-error]: expected command, found IDENT + ╭▸ +245 │ drop instance rule nonesuch on noplace; + ╰╴ ━ +error[syntax-error]: expected command, found RULE_KW + ╭▸ +245 │ drop instance rule nonesuch on noplace; + ╰╴ ━ +error[syntax-error]: expected command, found IDENT + ╭▸ +245 │ drop instance rule nonesuch on noplace; + ╰╴ ━ +error[syntax-error]: expected command, found ON_KW + ╭▸ +245 │ drop instance rule nonesuch on noplace; + ╰╴ ━ +error[syntax-error]: expected command, found IDENT + ╭▸ +245 │ drop instance rule nonesuch on noplace; + ╰╴ ━ +error[syntax-error]: expected command, found DROP_KW + ╭▸ +246 │ drop rewrite rule nonesuch; + ╰╴━ +error[syntax-error]: expected command, found IDENT + ╭▸ +246 │ drop rewrite rule nonesuch; + ╰╴ ━ +error[syntax-error]: expected command, found RULE_KW + ╭▸ +246 │ drop rewrite rule nonesuch; + ╰╴ ━ +error[syntax-error]: expected command, found IDENT + ╭▸ +246 │ drop rewrite rule nonesuch; + ╰╴ ━ +error[syntax-error]: expected command, found IDENT + ╭▸ +279 │ xxx; + ╰╴━ +error[syntax-error]: expected command, found CREATE_KW + ╭▸ +281 │ CREATE foo; + ╰╴━ +error[syntax-error]: expected command, found IDENT + ╭▸ +281 │ CREATE foo; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +283 │ CREATE TABLE ; + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +288 │ INSERT INTO foo VALUES(123) foo; + ╰╴ ━ +error[syntax-error]: expected command, found INTO_KW + ╭▸ +288 │ INSERT INTO foo VALUES(123) foo; + ╰╴ ━ +error[syntax-error]: expected command, found IDENT + ╭▸ +288 │ INSERT INTO foo VALUES(123) foo; + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +288 │ INSERT INTO foo VALUES(123) foo; + ╰╴ ━ +error[syntax-error]: expected command, found IDENT + ╭▸ +288 │ INSERT INTO foo VALUES(123) foo; + ╰╴ ━ +error[syntax-error]: expected path name + ╭▸ +290 │ INSERT INTO 123 + ╰╴ ━ +error[syntax-error]: expected select stmt + ╭▸ +290 │ INSERT INTO 123 + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +290 │ INSERT INTO 123 + ╰╴ ━ +error[syntax-error]: expected command, found INT_NUMBER + ╭▸ +290 │ INSERT INTO 123 + ╰╴ ━ +error[syntax-error]: expected SEMICOLON + ╭▸ +294 │ VALUES(123) 123 + ╰╴ ━ +error[syntax-error]: expected command, found INT_NUMBER + ╭▸ +294 │ VALUES(123) 123 + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +300 │ id3 INTEGER NOT NUL, + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +300 │ id3 INTEGER NOT NUL, + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +304 │ CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +304 │ CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +309 │ id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY); + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +309 │ id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY); + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +312 │ CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQ… + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +312 │ CREATE TABLE foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQ… + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +318 │ foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +318 │ foo(id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +334 │ id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY) + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +334 │ id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5 TEXT UNIQUE NOT NULL, id INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY) + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +344 │ UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNI… + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +344 │ UNIQUE NOT NULL, idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNI… + ╰╴ ━ +error[syntax-error]: expected NULL_KW + ╭▸ +364 │ idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5… + ╰╴ ━ +error[syntax-error]: expected COMMA + ╭▸ +364 │ idx INT4 UNIQUE NOT NULL, idy INT4 UNIQUE NOT NULL, id2 TEXT NOT NULL PRIMARY KEY, id3 INTEGER NOT NUL, id4 INT4 UNIQUE NOT NULL, id5… + ╰╴ ━ diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_nls.snap b/crates/squawk_parser/tests/snapshots/tests__regression_nls.snap new file mode 100644 index 00000000..c7ffb2c4 --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_nls.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/nls.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_partition_merge.snap b/crates/squawk_parser/tests/snapshots/tests__regression_partition_merge.snap new file mode 100644 index 00000000..69347c72 --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_partition_merge.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/partition_merge.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_partition_split.snap b/crates/squawk_parser/tests/snapshots/tests__regression_partition_split.snap new file mode 100644 index 00000000..7e5b1bde --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_partition_split.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/partition_split.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_pg_dependencies.snap b/crates/squawk_parser/tests/snapshots/tests__regression_pg_dependencies.snap new file mode 100644 index 00000000..d18c929f --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_pg_dependencies.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/pg_dependencies.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_pg_ndistinct.snap b/crates/squawk_parser/tests/snapshots/tests__regression_pg_ndistinct.snap new file mode 100644 index 00000000..dc1fe49d --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_pg_ndistinct.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/pg_ndistinct.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__regression_stats_rewrite.snap b/crates/squawk_parser/tests/snapshots/tests__regression_stats_rewrite.snap new file mode 100644 index 00000000..b6cb8b34 --- /dev/null +++ b/crates/squawk_parser/tests/snapshots/tests__regression_stats_rewrite.snap @@ -0,0 +1,5 @@ +--- +source: crates/squawk_parser/tests/tests.rs +input_file: crates/squawk_parser/tests/data/regression_suite/stats_rewrite.sql +--- + diff --git a/crates/squawk_parser/tests/snapshots/tests__select_casts_ok.snap b/crates/squawk_parser/tests/snapshots/tests__select_casts_ok.snap index d27928b2..dfee3fb4 100644 --- a/crates/squawk_parser/tests/snapshots/tests__select_casts_ok.snap +++ b/crates/squawk_parser/tests/snapshots/tests__select_casts_ok.snap @@ -340,6 +340,55 @@ SOURCE_FILE R_BRACK "]" SEMICOLON ";" WHITESPACE "\n\n" + COMMENT "-- setof" + WHITESPACE "\n" + SELECT + SELECT_CLAUSE + SELECT_KW "select" + WHITESPACE " " + TARGET_LIST + TARGET + CAST_EXPR + CAST_KW "cast" + L_PAREN "(" + LITERAL + STRING "'1'" + WHITESPACE " " + AS_KW "as" + WHITESPACE " " + PATH_TYPE + SETOF_KW "setof" + WHITESPACE " " + PATH + PATH_SEGMENT + NAME_REF + INT_KW "int" + R_PAREN ")" + SEMICOLON ";" + WHITESPACE "\n" + SELECT + SELECT_CLAUSE + SELECT_KW "select" + WHITESPACE " " + TARGET_LIST + TARGET + CAST_EXPR + LITERAL + STRING "'1'" + COLON_COLON + COLON ":" + COLON ":" + PATH_TYPE + PATH + PATH_SEGMENT + NAME_REF + SETOF_KW "setof" + WHITESPACE " " + AS_NAME + NAME + INT_KW "int" + SEMICOLON ";" + WHITESPACE "\n\n" COMMENT "-- based on postgres' gram.y" WHITESPACE "\n\n" COMMENT "-- Bit" diff --git a/crates/squawk_parser/tests/tests.rs b/crates/squawk_parser/tests/tests.rs index 6464bea4..5ffb3fda 100644 --- a/crates/squawk_parser/tests/tests.rs +++ b/crates/squawk_parser/tests/tests.rs @@ -93,9 +93,12 @@ fn regression_suite(fixture: Fixture<&str>) { let (_parsed, errors) = parse_text(content); + let expect_errors = test_name == "errors"; + let snapshot_name = format!("regression_{test_name}"); - let has_errors = errors.is_none(); + let no_errors = errors.is_none(); + let has_errors = !no_errors; with_settings!({ omit_expression => true, @@ -104,10 +107,17 @@ fn regression_suite(fixture: Fixture<&str>) { assert_snapshot!(snapshot_name, errors.unwrap_or_default()); }); - assert!( - has_errors, - "tests defined in the regression suite can't have parser errors." - ); + if expect_errors { + assert!( + has_errors, + "the errors.sql regression test must have errors" + ); + } else { + assert!( + no_errors, + "tests defined in the regression suite can't have parser errors." + ); + } } fn parse_text(text: &str) -> (String, Option) { diff --git a/crates/squawk_syntax/src/ast/generated/nodes.rs b/crates/squawk_syntax/src/ast/generated/nodes.rs index f0354a27..072eef20 100644 --- a/crates/squawk_syntax/src/ast/generated/nodes.rs +++ b/crates/squawk_syntax/src/ast/generated/nodes.rs @@ -3030,6 +3030,36 @@ impl Copy { } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct CopyOption { + pub(crate) syntax: SyntaxNode, +} +impl CopyOption { + #[inline] + pub fn name(&self) -> Option { + support::child(&self.syntax) + } +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct CopyOptionList { + pub(crate) syntax: SyntaxNode, +} +impl CopyOptionList { + #[inline] + pub fn copy_options(&self) -> AstChildren { + support::children(&self.syntax) + } + #[inline] + pub fn l_paren_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::L_PAREN) + } + #[inline] + pub fn r_paren_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::R_PAREN) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct CostFuncOption { pub(crate) syntax: SyntaxNode, @@ -10275,6 +10305,21 @@ impl MergeInsert { } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct MergePartitions { + pub(crate) syntax: SyntaxNode, +} +impl MergePartitions { + #[inline] + pub fn merge_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::MERGE_KW) + } + #[inline] + pub fn partitions_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::PARTITIONS_KW) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct MergeUpdate { pub(crate) syntax: SyntaxNode, @@ -11707,6 +11752,25 @@ impl ParenSelect { } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct Partition { + pub(crate) syntax: SyntaxNode, +} +impl Partition { + #[inline] + pub fn partition_type(&self) -> Option { + support::child(&self.syntax) + } + #[inline] + pub fn path(&self) -> Option { + support::child(&self.syntax) + } + #[inline] + pub fn partition_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::PARTITION_KW) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct PartitionBy { pub(crate) syntax: SyntaxNode, @@ -11884,6 +11948,25 @@ impl PartitionItemList { } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct PartitionList { + pub(crate) syntax: SyntaxNode, +} +impl PartitionList { + #[inline] + pub fn partitions(&self) -> AstChildren { + support::children(&self.syntax) + } + #[inline] + pub fn l_paren_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::L_PAREN) + } + #[inline] + pub fn r_paren_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::R_PAREN) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct PartitionOf { pub(crate) syntax: SyntaxNode, @@ -14646,6 +14729,25 @@ impl SourceFile { } } +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct SplitPartition { + pub(crate) syntax: SyntaxNode, +} +impl SplitPartition { + #[inline] + pub fn partition_list(&self) -> Option { + support::child(&self.syntax) + } + #[inline] + pub fn partition_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::PARTITION_KW) + } + #[inline] + pub fn split_token(&self) -> Option { + support::token(&self.syntax, SyntaxKind::SPLIT_KW) + } +} + #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Storage { pub(crate) syntax: SyntaxNode, @@ -16443,6 +16545,7 @@ pub enum AlterTableAction { EnableTrigger(EnableTrigger), ForceRls(ForceRls), InheritTable(InheritTable), + MergePartitions(MergePartitions), NoForceRls(NoForceRls), NoInheritTable(NoInheritTable), NotOf(NotOf), @@ -16462,6 +16565,7 @@ pub enum AlterTableAction { SetUnlogged(SetUnlogged), SetWithoutCluster(SetWithoutCluster), SetWithoutOids(SetWithoutOids), + SplitPartition(SplitPartition), ValidateConstraint(ValidateConstraint), } @@ -18837,6 +18941,42 @@ impl AstNode for Copy { &self.syntax } } +impl AstNode for CopyOption { + #[inline] + fn can_cast(kind: SyntaxKind) -> bool { + kind == SyntaxKind::COPY_OPTION + } + #[inline] + fn cast(syntax: SyntaxNode) -> Option { + if Self::can_cast(syntax.kind()) { + Some(Self { syntax }) + } else { + None + } + } + #[inline] + fn syntax(&self) -> &SyntaxNode { + &self.syntax + } +} +impl AstNode for CopyOptionList { + #[inline] + fn can_cast(kind: SyntaxKind) -> bool { + kind == SyntaxKind::COPY_OPTION_LIST + } + #[inline] + fn cast(syntax: SyntaxNode) -> Option { + if Self::can_cast(syntax.kind()) { + Some(Self { syntax }) + } else { + None + } + } + #[inline] + fn syntax(&self) -> &SyntaxNode { + &self.syntax + } +} impl AstNode for CostFuncOption { #[inline] fn can_cast(kind: SyntaxKind) -> bool { @@ -23481,6 +23621,24 @@ impl AstNode for MergeInsert { &self.syntax } } +impl AstNode for MergePartitions { + #[inline] + fn can_cast(kind: SyntaxKind) -> bool { + kind == SyntaxKind::MERGE_PARTITIONS + } + #[inline] + fn cast(syntax: SyntaxNode) -> Option { + if Self::can_cast(syntax.kind()) { + Some(Self { syntax }) + } else { + None + } + } + #[inline] + fn syntax(&self) -> &SyntaxNode { + &self.syntax + } +} impl AstNode for MergeUpdate { #[inline] fn can_cast(kind: SyntaxKind) -> bool { @@ -24633,6 +24791,24 @@ impl AstNode for ParenSelect { &self.syntax } } +impl AstNode for Partition { + #[inline] + fn can_cast(kind: SyntaxKind) -> bool { + kind == SyntaxKind::PARTITION + } + #[inline] + fn cast(syntax: SyntaxNode) -> Option { + if Self::can_cast(syntax.kind()) { + Some(Self { syntax }) + } else { + None + } + } + #[inline] + fn syntax(&self) -> &SyntaxNode { + &self.syntax + } +} impl AstNode for PartitionBy { #[inline] fn can_cast(kind: SyntaxKind) -> bool { @@ -24759,6 +24935,24 @@ impl AstNode for PartitionItemList { &self.syntax } } +impl AstNode for PartitionList { + #[inline] + fn can_cast(kind: SyntaxKind) -> bool { + kind == SyntaxKind::PARTITION_LIST + } + #[inline] + fn cast(syntax: SyntaxNode) -> Option { + if Self::can_cast(syntax.kind()) { + Some(Self { syntax }) + } else { + None + } + } + #[inline] + fn syntax(&self) -> &SyntaxNode { + &self.syntax + } +} impl AstNode for PartitionOf { #[inline] fn can_cast(kind: SyntaxKind) -> bool { @@ -26811,6 +27005,24 @@ impl AstNode for SourceFile { &self.syntax } } +impl AstNode for SplitPartition { + #[inline] + fn can_cast(kind: SyntaxKind) -> bool { + kind == SyntaxKind::SPLIT_PARTITION + } + #[inline] + fn cast(syntax: SyntaxNode) -> Option { + if Self::can_cast(syntax.kind()) { + Some(Self { syntax }) + } else { + None + } + } + #[inline] + fn syntax(&self) -> &SyntaxNode { + &self.syntax + } +} impl AstNode for Storage { #[inline] fn can_cast(kind: SyntaxKind) -> bool { @@ -28654,6 +28866,7 @@ impl AstNode for AlterTableAction { | SyntaxKind::ENABLE_TRIGGER | SyntaxKind::FORCE_RLS | SyntaxKind::INHERIT_TABLE + | SyntaxKind::MERGE_PARTITIONS | SyntaxKind::NO_FORCE_RLS | SyntaxKind::NO_INHERIT_TABLE | SyntaxKind::NOT_OF @@ -28673,6 +28886,7 @@ impl AstNode for AlterTableAction { | SyntaxKind::SET_UNLOGGED | SyntaxKind::SET_WITHOUT_CLUSTER | SyntaxKind::SET_WITHOUT_OIDS + | SyntaxKind::SPLIT_PARTITION | SyntaxKind::VALIDATE_CONSTRAINT ) } @@ -28718,6 +28932,9 @@ impl AstNode for AlterTableAction { SyntaxKind::ENABLE_TRIGGER => AlterTableAction::EnableTrigger(EnableTrigger { syntax }), SyntaxKind::FORCE_RLS => AlterTableAction::ForceRls(ForceRls { syntax }), SyntaxKind::INHERIT_TABLE => AlterTableAction::InheritTable(InheritTable { syntax }), + SyntaxKind::MERGE_PARTITIONS => { + AlterTableAction::MergePartitions(MergePartitions { syntax }) + } SyntaxKind::NO_FORCE_RLS => AlterTableAction::NoForceRls(NoForceRls { syntax }), SyntaxKind::NO_INHERIT_TABLE => { AlterTableAction::NoInheritTable(NoInheritTable { syntax }) @@ -28751,6 +28968,9 @@ impl AstNode for AlterTableAction { SyntaxKind::SET_WITHOUT_OIDS => { AlterTableAction::SetWithoutOids(SetWithoutOids { syntax }) } + SyntaxKind::SPLIT_PARTITION => { + AlterTableAction::SplitPartition(SplitPartition { syntax }) + } SyntaxKind::VALIDATE_CONSTRAINT => { AlterTableAction::ValidateConstraint(ValidateConstraint { syntax }) } @@ -28784,6 +29004,7 @@ impl AstNode for AlterTableAction { AlterTableAction::EnableTrigger(it) => &it.syntax, AlterTableAction::ForceRls(it) => &it.syntax, AlterTableAction::InheritTable(it) => &it.syntax, + AlterTableAction::MergePartitions(it) => &it.syntax, AlterTableAction::NoForceRls(it) => &it.syntax, AlterTableAction::NoInheritTable(it) => &it.syntax, AlterTableAction::NotOf(it) => &it.syntax, @@ -28803,6 +29024,7 @@ impl AstNode for AlterTableAction { AlterTableAction::SetUnlogged(it) => &it.syntax, AlterTableAction::SetWithoutCluster(it) => &it.syntax, AlterTableAction::SetWithoutOids(it) => &it.syntax, + AlterTableAction::SplitPartition(it) => &it.syntax, AlterTableAction::ValidateConstraint(it) => &it.syntax, } } @@ -28933,6 +29155,12 @@ impl From for AlterTableAction { AlterTableAction::InheritTable(node) } } +impl From for AlterTableAction { + #[inline] + fn from(node: MergePartitions) -> AlterTableAction { + AlterTableAction::MergePartitions(node) + } +} impl From for AlterTableAction { #[inline] fn from(node: NoForceRls) -> AlterTableAction { @@ -29047,6 +29275,12 @@ impl From for AlterTableAction { AlterTableAction::SetWithoutOids(node) } } +impl From for AlterTableAction { + #[inline] + fn from(node: SplitPartition) -> AlterTableAction { + AlterTableAction::SplitPartition(node) + } +} impl From for AlterTableAction { #[inline] fn from(node: ValidateConstraint) -> AlterTableAction { diff --git a/crates/squawk_syntax/src/postgresql.ungram b/crates/squawk_syntax/src/postgresql.ungram index 40e00537..a025503e 100644 --- a/crates/squawk_syntax/src/postgresql.ungram +++ b/crates/squawk_syntax/src/postgresql.ungram @@ -749,6 +749,8 @@ AlterTableAction = | ClusterOn | OwnerTo | DetachPartition +| MergePartitions +| SplitPartition | DropConstraint | DropColumn | AddConstraint @@ -2643,6 +2645,12 @@ Copy = 'with'? WhereClause? +CopyOptionList = + '(' (CopyOption (',' CopyOption)*) ')' + +CopyOption = + Name + Call = 'call' Path ArgList @@ -2866,6 +2874,24 @@ ClusterOn = DetachPartition = 'detach' 'partition' +MergePartitions = + 'merge' 'partitions' + '(' ')' + 'into' + Path + +SplitPartition = + 'split' 'partition' 'into' + PartitionList + +PartitionList = + '(' (Partition (',' Partition)*) ')' + +Partition = + 'partition' + Path + PartitionType + DropColumn = 'drop' 'column'? IfExists? NameRef ('restrict' | 'cascade')? diff --git a/crates/xtask/src/download_regression_tests.rs b/crates/xtask/src/download_regression_tests.rs index 33ff0758..cbbddd32 100644 --- a/crates/xtask/src/download_regression_tests.rs +++ b/crates/xtask/src/download_regression_tests.rs @@ -1,17 +1,121 @@ +use crate::path::project_root; use anyhow::{Result, bail}; use camino::Utf8PathBuf; use regex::Regex; use std::fs::{File, create_dir_all, remove_dir_all}; -use std::io::{BufRead, Cursor, Write}; +use std::io::{BufRead, Write}; use std::process::Command; -const OUTPUT_DIR: &str = "crates/squawk_parser/tests/data/regression_suite"; +const PROCESSED_OUTPUT_DIR: &str = "crates/squawk_parser/tests/data/regression_suite"; + +const START_END_MARKERS: &[(&str, &str)] = &[ + ( + "MERGE INTO target t RANDOMWORD", + "\tUPDATE SET balance = 0;", + ), + ( + "-- incorrectly specifying INTO target", + "\tINSERT INTO target DEFAULT VALUES;", + ), + ("-- Multiple VALUES clause", "\tINSERT VALUES (1,1), (2,2);"), + ("-- SELECT query for INSERT", "\tINSERT SELECT (1, 1);"), + ("-- UPDATE tablename", "\tUPDATE target SET balance = 0;"), +]; + +const IGNORED_LINES: &[&str] = &[ + r#"SELECT rank() OVER (PARTITION BY four, ORDER BY ten) FROM tenk1;"#, + r#"SELECT q.* FROM (SELECT * FROM test_tablesample) as q TABLESAMPLE BERNOULLI (5);"#, + r#"CREATE SEQUENCE tableam_seq_heap2 USING heap2;"#, + "CREATE VIEW tableam_view_heap2 USING heap2 AS SELECT * FROM tableam_tbl_heap2;", + "SELECT INTO tableam_tblselectinto_heap2 USING heap2 FROM tableam_tbl_heap2;", + "INSERT INTO foo DEFAULT VALUES RETURNING WITH (nonsuch AS something) *;", + "SELECT 0.0e;", + "SELECT 0.0e+a;", + "SELECT 0b;", + "SELECT 0o;", + "SELECT 0x;", + "SELECT _1_000.5;", + "EXPLAIN (COSTS OFF) :qry;", + ":qry;", + "create table foo (with baz);", + "create table foo (with ordinality);", + ":show_data;", + "alter trigger a on only grandparent rename to b; -- ONLY not supported", + "CREATE SUBSCRIPTION regress_testsub CONNECTION 'foo';", + "CREATE SUBSCRIPTION regress_testsub PUBLICATION foo;", + "SELECT U&'wrong: +0061' UESCAPE +;", + "CREATE STATISTICS tst;", + "CREATE STATISTICS tst ON a, b;", + "CREATE STATISTICS tst ON a FROM (VALUES (x)) AS foo;", + "CREATE STATISTICS tst ON a FROM foo NATURAL JOIN bar;", + "CREATE STATISTICS tst ON a FROM (SELECT * FROM ext_stats_test) AS foo;", + "CREATE STATISTICS tst ON a FROM ext_stats_test s TABLESAMPLE system (x);", + "CREATE STATISTICS tst ON a FROM XMLTABLE('foo' PASSING 'bar' COLUMNS a text);", + "CREATE STATISTICS tst ON a FROM JSON_TABLE(jsonb '123', '$' COLUMNS (item int));", + "CREATE STATISTICS alt_stat2 ON a FROM tftest(1);", + "ALTER STATISTICS IF EXISTS ab1_a_b_stats SET STATISTICS 0;", + "CHECKPOINT (WRONG);", + "CHECKPOINT (MODE WRONG);", + "CHECKPOINT (MODE FAST, FLUSH_UNLOGGED FALSE);", + "CHECKPOINT (FLUSH_UNLOGGED);", + "ALTER PUBLICATION testpub1_forschema ADD TABLES IN SCHEMA foo (a, b);", + "CREATE SCHEMA IF NOT EXISTS test_ns_schema_renamed -- fail, disallowed", + "insert into insertconflicttest values (1) on conflict (key int4_ops (fillfactor=10)) do nothing;", + "insert into insertconflicttest values (1) on conflict (key asc) do nothing;", + "insert into insertconflicttest values (1) on conflict (key nulls last) do nothing;", + "ALTER USER MAPPING FOR user SERVER ss4 OPTIONS (gotcha 'true'); -- ERROR", + "ALTER FOREIGN DATA WRAPPER foo; -- ERROR", + "ALTER SERVER s0; -- ERROR", + "ALTER USER MAPPING FOR user SERVER ss4 OPTIONS (gotcha 'true'); -- ERROR", + "alter table atacc1 SET WITH OIDS;", + "alter table atacc1 drop xmin;", + "create view myview as select * from atacc1;", + "CREATE INDEX IF NOT EXISTS ON onek USING btree(unique1 int4_ops);", + "SELECT 10 !=-;", + "CREATE TABLE withoid() WITH OIDS;", + "update dposintatable set (f1[2])[1] = array[98];", + "CREATE FOREIGN TABLE ft1 (); -- ERROR", + r#"select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6;"#, + r#"select 'a\\bcd' as f1, 'a\\b\'cd' as f2, 'a\\b\'''cd' as f3, 'abcd\\' as f4, 'ab\\\'cd' as f5, '\\\\' as f6;"#, + "copy (select * from test1) (t,id) to stdout;", +]; + +const VARIABLE_REPLACEMENTS: &[(&str, &str)] = &[ + (":reltoastname", "reltoastname"), + (":temp_schema_name", "temp_schema_name"), + (":toastrel", "toastrel"), + (":newloid", "10101"), + (r#" :""#, r#" ""#), +]; + +const GSET_REPLACEMENTS: &[(&str, &str)] = &[ + ( + "\\gset my_io_sum_shared_before_", + "/* \\gset my_io_sum_shared_before_ */;", + ), + ( + "\\gset io_sum_shared_before_", + "/* \\gset io_sum_shared_before_ */;", + ), + ( + "\\gset io_sum_wal_normal_before_", + "/* \\gset io_sum_wal_normal_before_ */;", + ), +]; pub(crate) fn download_regression_tests() -> Result<()> { - let target_dir = Utf8PathBuf::from(OUTPUT_DIR); + let temp_dir = download_regression_suite()?; + transform_regression_suite(&temp_dir)?; + Ok(()) +} + +fn download_regression_suite() -> Result { + let target_dir = Utf8PathBuf::try_from(std::env::temp_dir()) + .map_err(|_| anyhow::anyhow!("temp dir path is not valid UTF-8"))? + .join("squawk_raw_regression_suite"); if target_dir.exists() { - println!("Cleaning target directory: {target_dir:?}"); + println!("Cleaning temp directory: {target_dir:?}"); remove_dir_all(&target_dir)?; } @@ -23,7 +127,6 @@ pub(crate) fn download_regression_tests() -> Result<()> { for (index, url) in urls.iter().enumerate() { let filename = url.split('/').next_back().unwrap(); if filename.contains("psql") { - // skipping this for now, we don't support psql continue; } let filepath = target_dir.join(filename); @@ -39,31 +142,60 @@ pub(crate) fn download_regression_tests() -> Result<()> { if !output.status.success() { let error_msg = String::from_utf8_lossy(&output.stderr); - bail!(anyhow::anyhow!( - "Failed to download '{}': {}", - url, - error_msg - )); + bail!("Failed to download '{}': {}", url, error_msg); } - let mut processed_content = Vec::new(); + File::create(&filepath)?.write_all(&output.stdout)?; + } + + Ok(target_dir) +} - let cursor = Cursor::new(&output.stdout); +fn transform_regression_suite(input_dir: &Utf8PathBuf) -> Result<()> { + let output_dir = project_root().join(PROCESSED_OUTPUT_DIR); - if let Err(e) = preprocess_sql(cursor, &mut processed_content) { + if output_dir.exists() { + println!("Cleaning target directory: {output_dir:?}"); + remove_dir_all(&output_dir)?; + } + + create_dir_all(&output_dir)?; + + let mut files: Vec = vec![]; + for entry in std::fs::read_dir(input_dir)? { + let entry = entry?; + let path = Utf8PathBuf::try_from(entry.path())?; + if path.extension() == Some("sql") { + files.push(path); + } + } + + files.sort(); + let total_files = files.len(); + + for (index, input_path) in files.iter().enumerate() { + let filename = input_path.file_name().unwrap(); + let output_path = output_dir.join(filename); + + println!("[{}/{}] Processing {}...", index + 1, total_files, filename); + + let input_file = File::open(input_path)?; + let reader = std::io::BufReader::new(input_file); + let mut processed_content = vec![]; + + if let Err(e) = preprocess_sql(reader, &mut processed_content) { eprintln!("Error: Failed to process file: {e}"); continue; } - let mut dest = File::create(&filepath)?; - dest.write_all(&processed_content)? + let mut dest = File::create(&output_path)?; + dest.write_all(&processed_content)?; } Ok(()) } fn fetch_download_urls() -> Result> { - // Fetch list of SQL file URLs println!("Fetching SQL file URLs..."); let output = Command::new("gh") .args([ @@ -75,152 +207,221 @@ fn fetch_download_urls() -> Result> { .output()?; if !output.status.success() { - bail!(anyhow::anyhow!( + bail!( "Failed to fetch SQL files: {}", String::from_utf8_lossy(&output.stderr) - )); + ); } let json_str = String::from_utf8(output.stdout)?; let files: Vec = serde_json::from_str(&json_str)?; - // Extract download URLs for SQL files let urls: Vec = files .into_iter() .filter(|file| { file["name"] .as_str() - .map(|name| name.ends_with(".sql")) - .unwrap_or(false) + .is_some_and(|name| name.ends_with(".sql")) }) .filter_map(|file| file["download_url"].as_str().map(String::from)) .collect(); if urls.is_empty() { - bail!(anyhow::anyhow!("No SQL files found")); + bail!("No SQL files found"); } Ok(urls) } -fn preprocess_sql(source: R, mut dest: W) -> Result<()> { - let mut skipping_copy_block = false; - +// The regression suite from postgres has a mix of valid and invalid sql. We +// don't have a good way to determine what is what, so we munge the data to +// comment out any problematic code. +pub(crate) fn preprocess_sql(source: R, mut dest: W) -> Result<()> { let template_vars_regex = Regex::new(r"^:'([^']+)'|^:([a-zA-Z_][a-zA-Z0-9_]*)").unwrap(); + let mut in_copy_stdin = false; + let mut in_bogus_cases = false; + let mut in_copy_select_input = false; + let mut looking_for_end: Option<&str> = None; - for (idx, line) in source.lines().enumerate() { + for line in source.lines() { let mut line = line?; + let mut should_comment = false; - // Detect the start of the COPY block - if line.starts_with("COPY ") && line.to_lowercase().contains("from stdin") { - skipping_copy_block = true; - continue; + if line.contains("bogus cases") { + in_bogus_cases = true; + } else if line.is_empty() { + in_bogus_cases = false; } - // Detect the end of the COPY block - if skipping_copy_block && (line.starts_with("\\.") || line.is_empty()) { - skipping_copy_block = false; - continue; + if line.contains("copy test3 from stdin\\;") { + in_copy_select_input = true; + } else if line.contains("select * from test3") { + in_copy_select_input = false; } - // Skip lines if inside a COPY block - if skipping_copy_block { - continue; + for &(start, end) in START_END_MARKERS { + if line.contains(start) { + looking_for_end = Some(end); + } } - if line.starts_with("\\") { - // Skip plpgsql commands (for now) - continue; + if let Some(end) = looking_for_end { + should_comment = true; + if line.contains(end) { + looking_for_end = None; + } + } + + let line_lower = line.to_lowercase(); + if (line_lower.starts_with("copy ") || line_lower.starts_with("\\copy")) + && (line_lower.contains("from stdin") || line_lower.contains("from stdout")) + { + in_copy_stdin = true; + if line.starts_with("\\copy") { + should_comment = true; + } + } else if in_copy_stdin { + if line == "\\." + || line.starts_with("--") + || ["copy", "begin", "rollback", "select"] + .iter() + .any(|prefix| line_lower.starts_with(prefix)) + { + in_copy_stdin = false; + } + should_comment = true; + } else if (line.trim_start().starts_with('\\') && !line.contains("\\gset")) + || line.starts_with("'show_data'") + || line.starts_with(':') + { + should_comment = true; + } + + if in_bogus_cases || in_copy_select_input { + should_comment = true; + } + + if IGNORED_LINES.iter().any(|&prefix| line.starts_with(prefix)) { + should_comment = true; + } + + if line.contains("\\;") || line.starts_with("**") { + should_comment = true; + } + + if should_comment { + line = format!("-- {line}"); + } + + for &(from, to) in GSET_REPLACEMENTS { + line = line.replace(from, to); + } + + line = line.replace( + "FROM generate_series(1, 1100) g(i)", + "FROM generate_series(1, 1100) g(i);", + ); + + for &(from, to) in VARIABLE_REPLACEMENTS { + line = line.replace(from, to); } - // replace "\gset" with ";" if line.contains("\\gset") { - line = line.replace("\\gset", ";"); + if let Some(start) = line.find("\\gset") { + let end = line[start..] + .find('\n') + .map(|i| start + i) + .unwrap_or(line.len()); + let gset_cmd = line[start..end].trim_end(); + line = format!("{}/* {} */;{}", &line[..start], gset_cmd, &line[end..]); + } } - // Replace template variables - let mut result = String::new(); - let mut i = 0; - let bytes = line.as_bytes(); - let mut in_single_quote = false; - let mut in_double_quote = false; - let mut in_array = false; - - while i < bytes.len() { - let c = bytes[i] as char; - - // Handle quote state transitions - match c { - '\'' => { - result.push(c); - i += 1; - in_single_quote = !in_single_quote; - continue; - } - '"' => { - result.push(c); - i += 1; - in_double_quote = !in_double_quote; - continue; - } - '[' => { - result.push(c); - i += 1; - in_array = true; - continue; - } - ']' => { - result.push(c); - i += 1; - in_array = false; - continue; - } - ':' if !in_single_quote && !in_double_quote && !in_array => { - // Skip type casts (e.g., ::text) - if i + 1 < bytes.len() && bytes[i + 1] as char == ':' { + if line.trim_start().starts_with("--") { + writeln!(dest, "{line}")?; + continue; + } + + let processed = replace_template_vars(&line, &template_vars_regex)?; + writeln!(dest, "{processed}")?; + } + + Ok(()) +} + +fn replace_template_vars(line: &str, template_vars_regex: &Regex) -> Result { + let mut result = String::new(); + let mut char_indices = line.char_indices().peekable(); + let mut in_single_quote = false; + let mut in_double_quote = false; + let mut in_array = false; + + while let Some((byte_pos, c)) = char_indices.next() { + match c { + '\'' => { + result.push(c); + in_single_quote = !in_single_quote; + } + '"' => { + result.push(c); + in_double_quote = !in_double_quote; + } + '[' => { + result.push(c); + in_array = true; + } + ']' => { + result.push(c); + in_array = false; + } + ':' if !in_single_quote && !in_double_quote && !in_array => { + if let Some(&(_, next_c)) = char_indices.peek() { + if next_c == ':' { result.push_str("::"); - i += 2; + char_indices.next(); continue; } - - if i + 2 < bytes.len() && bytes[i + 1] as char == '=' { + if next_c == '=' { result.push_str(":="); - i += 2; + char_indices.next(); continue; } + } - let remaining = &line[i..]; - if let Some(caps) = template_vars_regex.captures(remaining) { - let full = caps.get(0).unwrap(); - let m = caps.get(1).or_else(|| caps.get(2)).unwrap(); - let matched_var = &remaining[m.start()..m.end()]; - - println!("#{idx} Replacing template variable {matched_var}"); - - result.push('\''); - result.push_str(matched_var); - result.push('\''); - - i += full.end(); - continue; + let remaining = &line[byte_pos..]; + if let Some(caps) = template_vars_regex.captures(remaining) { + let full = caps.get(0).unwrap(); + let m = caps.get(1).or_else(|| caps.get(2)).unwrap(); + let matched_var = &remaining[m.start()..m.end()]; + + result.push('\''); + result.push_str(matched_var); + result.push('\''); + + let skip_bytes = full.end() - c.len_utf8(); + let mut skipped = 0; + while skipped < skip_bytes { + if let Some((_, ch)) = char_indices.next() { + skipped += ch.len_utf8(); + } else { + break; + } } + continue; } - _ => {} + result.push(c); } - - result.push(c); - i += 1; + _ => result.push(c), } - - // Write the cleaned line - writeln!(dest, "{result}")?; } - Ok(()) + Ok(result) } #[cfg(test)] mod tests { + use std::io::Cursor; + use super::*; fn test_preprocess_sql(sql: &str) -> Result { @@ -255,6 +456,26 @@ mod tests { "SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON);", "SELECT JSON_OBJECT('foo': NULL::int FORMAT JSON);", ), + ( + r#"ALTER DATABASE :"datname" REFRESH COLLATION VERSION;"#, + r#"ALTER DATABASE "datname" REFRESH COLLATION VERSION;"#, + ), + ( + "-- comment with :placeholder should not be replaced", + "-- comment with :placeholder should not be replaced", + ), + ( + " -- indented comment with :foo", + " -- indented comment with :foo", + ), + ( + "SELECT 'ὀδυσσεύς' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_sensitive;", + "SELECT 'ὀδυσσεύς' = 'ὈΔΥΣΣΕΎΣ' COLLATE case_sensitive;", + ), + ( + "SELECT 'ὀδυσσεύς' WHERE name = :greek_name;", + "SELECT 'ὀδυσσεύς' WHERE name = 'greek_name';", + ), ]; for (input, expected) in &cases { diff --git a/postgres/kwlist.h b/postgres/kwlist.h index f0decd44..8b780fa7 100644 --- a/postgres/kwlist.h +++ b/postgres/kwlist.h @@ -1,7 +1,7 @@ // synced from: -// commit: b0fb2c6aa5a485e28210e13ae5536c1231b1261f -// committed at: 2025-09-27T21:17:51Z -// file: https://github.com/postgres/postgres/blob/b0fb2c6aa5a485e28210e13ae5536c1231b1261f/src/include/parser/kwlist.h +// commit: e5f3839af685c303d8ebcc1ea0d407c124372931 +// committed at: 2025-12-22T22:41:34Z +// file: https://github.com/postgres/postgres/blob/e5f3839af685c303d8ebcc1ea0d407c124372931/src/include/parser/kwlist.h // // update via: // cargo xtask sync-kwlist @@ -210,6 +210,7 @@ PG_KEYWORD("hold", HOLD, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("hour", HOUR_P, UNRESERVED_KEYWORD, AS_LABEL) PG_KEYWORD("identity", IDENTITY_P, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("if", IF_P, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("ignore", IGNORE_P, UNRESERVED_KEYWORD, AS_LABEL) PG_KEYWORD("ilike", ILIKE, TYPE_FUNC_NAME_KEYWORD, BARE_LABEL) PG_KEYWORD("immediate", IMMEDIATE, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("immutable", IMMUTABLE, UNRESERVED_KEYWORD, BARE_LABEL) @@ -277,6 +278,7 @@ PG_KEYWORD("location", LOCATION, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("lock", LOCK_P, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("locked", LOCKED, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("logged", LOGGED, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("lsn", LSN_P, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("mapping", MAPPING, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("match", MATCH, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("matched", MATCHED, UNRESERVED_KEYWORD, BARE_LABEL) @@ -345,6 +347,7 @@ PG_KEYWORD("parameter", PARAMETER, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("parser", PARSER, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("partial", PARTIAL, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("partition", PARTITION, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("partitions", PARTITIONS, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("passing", PASSING, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("password", PASSWORD, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("path", PATH, UNRESERVED_KEYWORD, BARE_LABEL) @@ -386,6 +389,7 @@ PG_KEYWORD("repeatable", REPEATABLE, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("replace", REPLACE, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("replica", REPLICA, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("reset", RESET, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("respect", RESPECT_P, UNRESERVED_KEYWORD, AS_LABEL) PG_KEYWORD("restart", RESTART, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("restrict", RESTRICT, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("return", RETURN, UNRESERVED_KEYWORD, BARE_LABEL) @@ -428,6 +432,7 @@ PG_KEYWORD("smallint", SMALLINT, COL_NAME_KEYWORD, BARE_LABEL) PG_KEYWORD("snapshot", SNAPSHOT, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("some", SOME, RESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("source", SOURCE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("split", SPLIT, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("sql", SQL_P, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("stable", STABLE, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("standalone", STANDALONE_P, UNRESERVED_KEYWORD, BARE_LABEL) @@ -502,6 +507,7 @@ PG_KEYWORD("view", VIEW, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("views", VIEWS, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("virtual", VIRTUAL, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("volatile", VOLATILE, UNRESERVED_KEYWORD, BARE_LABEL) +PG_KEYWORD("wait", WAIT, UNRESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("when", WHEN, RESERVED_KEYWORD, BARE_LABEL) PG_KEYWORD("where", WHERE, RESERVED_KEYWORD, AS_LABEL) PG_KEYWORD("whitespace", WHITESPACE_P, UNRESERVED_KEYWORD, BARE_LABEL)