#!/usr/bin/env bash # # Test suite for pg_scribe --new-chain command # # This test suite: # - Creates temporary test databases # - Tests various --new-chain scenarios # - Verifies expected outcomes # - Cleans up all resources # set -euo pipefail # Colors for test output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[0;33m' BLUE='\033[0;34m' NC='\033[0m' # No Color # Test configuration SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" PG_SCRIBE="$SCRIPT_DIR/scripts/pg_scribe" TEST_DIR="/tmp/pg_scribe_test_$$" TEST_DB_PREFIX="pg_scribe_test_$$" PGUSER="${PGUSER:-postgres}" # Test counters TESTS_RUN=0 TESTS_PASSED=0 TESTS_FAILED=0 # Cleanup tracking DATABASES_TO_CLEANUP=() # # Logging functions # log_test() { echo -e "${BLUE}TEST:${NC} $*" } log_pass() { echo -e "${GREEN}PASS:${NC} $*" ((TESTS_PASSED++)) } log_fail() { echo -e "${RED}FAIL:${NC} $*" ((TESTS_FAILED++)) } log_info() { echo -e "${YELLOW}INFO:${NC} $*" } # # Helper functions # run_psql() { local dbname="$1" shift psql -U "$PGUSER" -d "$dbname" -tAq "$@" } query_db() { local dbname="$1" local query="$2" run_psql "$dbname" -c "$query" 2>/dev/null || true } create_test_db() { local dbname="$1" log_info "Creating test database: $dbname" # Drop if exists psql -U "$PGUSER" -d postgres -c "DROP DATABASE IF EXISTS $dbname;" &>/dev/null || true # Create database psql -U "$PGUSER" -d postgres -c "CREATE DATABASE $dbname;" &>/dev/null DATABASES_TO_CLEANUP+=("$dbname") } # shellcheck disable=SC2317 # Function called from cleanup trap handler drop_test_db() { local dbname="$1" log_info "Dropping test database: $dbname" # Terminate connections psql -U "$PGUSER" -d postgres -c " SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = '$dbname' AND pid <> pg_backend_pid(); " &>/dev/null || true # Drop database psql -U "$PGUSER" -d postgres -c "DROP DATABASE IF EXISTS $dbname;" &>/dev/null || true } create_table_with_pk() { local dbname="$1" local table="$2" query_db "$dbname" " CREATE TABLE $table ( id SERIAL PRIMARY KEY, name TEXT, created_at TIMESTAMP DEFAULT now() ); " } # Initialize a backup directory (creates replication slot and initial backups) init_backup_system() { local dbname="$1" local backup_dir="$2" local slot="$3" mkdir -p "$backup_dir" "$PG_SCRIBE" --init -d "$dbname" -f "$backup_dir" -S "$slot" -U "$PGUSER" &>/dev/null } # # Test cases # test_new_chain_requires_args() { ((TESTS_RUN++)) log_test "New chain requires database and directory" local exit_code=0 # Missing database "$PG_SCRIBE" --new-chain -f /tmp/test &>/dev/null || exit_code=$? if [[ $exit_code -ne 5 ]]; then log_fail "Should fail with exit code 5 when missing database" return 1 fi # Missing directory exit_code=0 "$PG_SCRIBE" --new-chain -d testdb &>/dev/null || exit_code=$? if [[ $exit_code -ne 5 ]]; then log_fail "Should fail with exit code 5 when missing directory" return 1 fi log_pass "Argument validation works" return 0 } test_new_chain_directory_must_exist() { ((TESTS_RUN++)) log_test "New chain requires existing directory" local dbname="${TEST_DB_PREFIX}_dircheck" local backup_dir="$TEST_DIR/nonexistent_dir" create_test_db "$dbname" create_table_with_pk "$dbname" "users" # Try to create chain in non-existent directory local exit_code=0 "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -U "$PGUSER" &>/dev/null || exit_code=$? if [[ $exit_code -eq 4 ]]; then log_pass "Correctly rejects non-existent directory" return 0 else log_fail "Expected exit code 4, got $exit_code" return 1 fi } test_new_chain_basic_success() { ((TESTS_RUN++)) log_test "Basic new chain success (no compression)" local dbname="${TEST_DB_PREFIX}_basic" local backup_dir="$TEST_DIR/basic" local slot="test_slot_basic" # Setup - initialize backup system first create_test_db "$dbname" create_table_with_pk "$dbname" "users" query_db "$dbname" "INSERT INTO users (name) VALUES ('Alice'), ('Bob'), ('Charlie');" init_backup_system "$dbname" "$backup_dir" "$slot" # Sleep to ensure different timestamp sleep 2 # Create a new chain without compression if "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -Z none -U "$PGUSER" &>/dev/null; then # Count chain directories (should have 2: 1 from init, 1 from new-chain) local chain_count chain_count=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | wc -l) if [[ $chain_count -ne 2 ]]; then log_fail "Expected 2 chain directories, got $chain_count" return 1 fi # Get latest chain directory local latest_chain latest_chain=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | sort | tail -1) # Verify chain structure if [[ ! -f "$latest_chain/base.sql" ]]; then log_fail "base.sql not found in latest chain" return 1 fi if [[ ! -f "$latest_chain/globals.sql" ]]; then log_fail "globals.sql not found in latest chain" return 1 fi if [[ ! -f "$latest_chain/metadata.json" ]]; then log_fail "metadata.json not found in latest chain" return 1 fi # Verify backup content if ! grep -q "CREATE TABLE public.users" "$latest_chain/base.sql"; then log_fail "Base backup missing table definition" return 1 fi if ! grep -q "Alice" "$latest_chain/base.sql"; then log_fail "Base backup missing data" return 1 fi # Verify metadata content (JSON format) if ! grep -q "\"database\": \"$dbname\"" "$latest_chain/metadata.json"; then log_fail "Metadata missing database name" return 1 fi log_pass "Basic new chain successful" return 0 else log_fail "New chain command failed" return 1 fi } test_new_chain_with_gzip_compression() { ((TESTS_RUN++)) log_test "New chain with gzip compression" local dbname="${TEST_DB_PREFIX}_gzip" local backup_dir="$TEST_DIR/gzip" local slot="test_slot_gzip" # Setup create_test_db "$dbname" create_table_with_pk "$dbname" "data_table" # Add enough data to see compression benefit query_db "$dbname" "INSERT INTO data_table (name) SELECT 'Row ' || generate_series(1, 1000);" init_backup_system "$dbname" "$backup_dir" "$slot" # Sleep to ensure different timestamp sleep 2 # Create new chain with gzip compression if "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -Z gzip -U "$PGUSER" &>/dev/null; then # Get latest chain directory local latest_chain latest_chain=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | sort | tail -1) # Check for compressed base.sql in chain directory if [[ ! -f "$latest_chain/base.sql.gz" ]]; then log_fail "No gzip-compressed base backup found in chain" return 1 fi # Globals are not compressed (too small to benefit) if [[ ! -f "$latest_chain/globals.sql" ]]; then log_fail "No globals backup found in chain" return 1 fi # Verify we can decompress and read the backup if ! gunzip -t "$latest_chain/base.sql.gz" &>/dev/null; then log_fail "Compressed backup file is invalid" return 1 fi # Verify content if ! gunzip -c "$latest_chain/base.sql.gz" | grep -q "CREATE TABLE public.data_table"; then log_fail "Compressed backup missing table definition" return 1 fi log_pass "Gzip compression successful" return 0 else log_fail "New chain with gzip failed" return 1 fi } test_new_chain_no_compression() { ((TESTS_RUN++)) log_test "New chain with no compression (default)" local dbname="${TEST_DB_PREFIX}_nocomp" local backup_dir="$TEST_DIR/nocomp" local slot="test_slot_nocomp" # Setup create_test_db "$dbname" create_table_with_pk "$dbname" "data_table" query_db "$dbname" "INSERT INTO data_table (name) SELECT 'Row ' || generate_series(1, 500);" init_backup_system "$dbname" "$backup_dir" "$slot" # Sleep to ensure different timestamp sleep 2 # Create new chain with default compression (none) if "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -U "$PGUSER" &>/dev/null; then # Get latest chain directory local latest_chain latest_chain=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | sort | tail -1) # Check for uncompressed base.sql in chain directory if [[ ! -f "$latest_chain/base.sql" ]]; then log_fail "No uncompressed base backup found in chain" return 1 fi # Globals are not compressed (too small to benefit) if [[ ! -f "$latest_chain/globals.sql" ]]; then log_fail "No globals backup found in chain" return 1 fi # Verify content if ! grep -q "CREATE TABLE public.data_table" "$latest_chain/base.sql"; then log_fail "Base backup missing table definition" return 1 fi log_pass "No compression (default) successful" return 0 else log_fail "New chain with default compression failed" return 1 fi } test_new_chain_multiple_times() { ((TESTS_RUN++)) log_test "Multiple new chains (retention simulation)" local dbname="${TEST_DB_PREFIX}_multi" local backup_dir="$TEST_DIR/multi" local slot="test_slot_multi" # Setup create_test_db "$dbname" create_table_with_pk "$dbname" "counter" init_backup_system "$dbname" "$backup_dir" "$slot" # Create multiple new chains with data changes for i in 1 2 3; do query_db "$dbname" "INSERT INTO counter (name) VALUES ('Iteration $i');" sleep 1 # Ensure different timestamps if ! "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -Z none -U "$PGUSER" &>/dev/null; then log_fail "New chain $i failed" return 1 fi done # Count total chains (1 from init + 3 from new-chain = 4) local chain_count chain_count=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | wc -l) if [[ $chain_count -ne 4 ]]; then log_fail "Expected 4 chain directories, got $chain_count" return 1 fi # Verify latest chain has all data local latest_chain latest_chain=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | sort | tail -1) for i in 1 2 3; do if ! grep -q "Iteration $i" "$latest_chain/base.sql"; then log_fail "Latest chain missing data from iteration $i" return 1 fi done log_pass "Multiple new chains successful" return 0 } test_new_chain_restorability() { ((TESTS_RUN++)) log_test "New chain is restorable" local dbname="${TEST_DB_PREFIX}_restore" local backup_dir="$TEST_DIR/restore" local slot="test_slot_restore" local restore_dbname="${TEST_DB_PREFIX}_restored" # Setup create_test_db "$dbname" create_table_with_pk "$dbname" "products" query_db "$dbname" "INSERT INTO products (name) VALUES ('Widget'), ('Gadget'), ('Doohickey');" init_backup_system "$dbname" "$backup_dir" "$slot" # Create a new chain if ! "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -Z none -U "$PGUSER" &>/dev/null; then log_fail "New chain failed" return 1 fi # Use pg_scribe --restore to restore the chain if ! "$PG_SCRIBE" --restore -d "$restore_dbname" -f "$backup_dir" -C -U "$PGUSER" &>/dev/null; then log_fail "Restore failed" return 1 fi DATABASES_TO_CLEANUP+=("$restore_dbname") # Verify restored data local count count=$(query_db "$restore_dbname" "SELECT COUNT(*) FROM products;") if [[ "$count" -ne 3 ]]; then log_fail "Expected 3 rows, got $count" return 1 fi local widget_exists widget_exists=$(query_db "$restore_dbname" "SELECT COUNT(*) FROM products WHERE name = 'Widget';") if [[ "$widget_exists" -ne 1 ]]; then log_fail "Expected to find Widget in restored data" return 1 fi log_pass "New chain is restorable" return 0 } test_new_chain_with_complex_schema() { ((TESTS_RUN++)) log_test "New chain with complex schema (indexes, constraints)" local dbname="${TEST_DB_PREFIX}_complex" local backup_dir="$TEST_DIR/complex" local slot="test_slot_complex" # Setup with complex schema create_test_db "$dbname" query_db "$dbname" " CREATE TABLE authors ( id SERIAL PRIMARY KEY, name TEXT NOT NULL UNIQUE, email TEXT ); CREATE TABLE books ( id SERIAL PRIMARY KEY, title TEXT NOT NULL, author_id INTEGER REFERENCES authors(id) ON DELETE CASCADE, published_date DATE, isbn TEXT UNIQUE ); CREATE INDEX idx_books_title ON books(title); CREATE INDEX idx_books_published ON books(published_date); " query_db "$dbname" " INSERT INTO authors (name, email) VALUES ('John Doe', 'john@example.com'); INSERT INTO books (title, author_id, published_date, isbn) VALUES ('Test Book', 1, '2024-01-01', '1234567890'); " init_backup_system "$dbname" "$backup_dir" "$slot" # Sleep to ensure different timestamp sleep 2 # Create new chain if ! "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -Z none -U "$PGUSER" &>/dev/null; then log_fail "New chain failed" return 1 fi # Get latest chain and verify backup contains schema elements local latest_chain latest_chain=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | sort | tail -1) if ! grep -q "CREATE TABLE public.authors" "$latest_chain/base.sql"; then log_fail "Backup missing authors table" return 1 fi if ! grep -q "CREATE TABLE public.books" "$latest_chain/base.sql"; then log_fail "Backup missing books table" return 1 fi if ! grep -q "UNIQUE" "$latest_chain/base.sql"; then log_fail "Backup missing unique constraints" return 1 fi if ! grep -q "REFERENCES" "$latest_chain/base.sql"; then log_fail "Backup missing foreign key" return 1 fi if ! grep -q "CREATE INDEX" "$latest_chain/base.sql"; then log_fail "Backup missing indexes" return 1 fi log_pass "Complex schema backed up successfully" return 0 } test_new_chain_metadata_tracking() { ((TESTS_RUN++)) log_test "Metadata file tracks chain information" local dbname="${TEST_DB_PREFIX}_metadata" local backup_dir="$TEST_DIR/metadata" local slot="test_slot_metadata" # Setup create_test_db "$dbname" create_table_with_pk "$dbname" "test_table" init_backup_system "$dbname" "$backup_dir" "$slot" # Sleep to ensure different timestamp sleep 2 # Create a new chain if ! "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -Z none -U "$PGUSER" &>/dev/null; then log_fail "New chain failed" return 1 fi # Get latest chain and verify metadata content local latest_chain latest_chain=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | sort | tail -1) local metadata_file="$latest_chain/metadata.json" if [[ ! -f "$metadata_file" ]]; then log_fail "Metadata file not found in chain" return 1 fi if ! grep -q "\"database\": \"$dbname\"" "$metadata_file"; then log_fail "Metadata missing database name" return 1 fi if ! grep -q "\"created\":" "$metadata_file"; then log_fail "Metadata missing created timestamp" return 1 fi log_pass "Metadata tracking works correctly" return 0 } test_new_chain_with_start_flag() { ((TESTS_RUN++)) log_test "New chain with --start flag validates arguments" local dbname="${TEST_DB_PREFIX}_start_flag" local backup_dir="$TEST_DIR/start_flag" local slot="test_slot_start_flag" # Setup create_test_db "$dbname" create_table_with_pk "$dbname" "test_table" init_backup_system "$dbname" "$backup_dir" "$slot" # Test 1: --new-chain --start should require database name local exit_code=0 "$PG_SCRIBE" --new-chain --start -f "$backup_dir" -U "$PGUSER" &>/dev/null || exit_code=$? if [[ $exit_code -ne 5 ]]; then log_fail "Should require database name with --start, got exit code $exit_code" return 1 fi # Test 2: Verify --start flag is accepted with proper arguments # Note: We can't fully test the background behavior in a simple test, # but we can verify the command parses correctly and would start # (We won't actually let it start streaming to avoid complexity) # Sleep to ensure different timestamp for chain sleep 2 # Create new chain WITHOUT --start flag (to verify it works) if ! "$PG_SCRIBE" --new-chain -d "$dbname" -f "$backup_dir" -Z none -U "$PGUSER" &>/dev/null; then log_fail "New chain creation failed" return 1 fi # Verify 2 chains exist now local chain_count chain_count=$(find "$backup_dir" -maxdepth 1 -type d -name 'chain-*' 2>/dev/null | wc -l) if [[ $chain_count -ne 2 ]]; then log_fail "Expected 2 chains, got $chain_count" return 1 fi log_pass "--start flag argument parsing works correctly" return 0 } # # Cleanup # # shellcheck disable=SC2317 # Function called via trap handler cleanup() { log_info "Cleaning up test resources..." # Drop replication slots (before dropping databases) for dbname in "${DATABASES_TO_CLEANUP[@]}"; do for slot in test_slot_basic test_slot_gzip test_slot_nocomp test_slot_multi test_slot_restore test_slot_complex test_slot_metadata test_slot_start_flag; do # Try to drop slot psql -U "$PGUSER" -d "$dbname" -c " SELECT pg_drop_replication_slot('$slot') FROM pg_replication_slots WHERE slot_name = '$slot'; " &>/dev/null || true done done # Drop databases for dbname in "${DATABASES_TO_CLEANUP[@]}"; do drop_test_db "$dbname" done # Remove test directory if [[ -d "$TEST_DIR" ]]; then rm -rf "$TEST_DIR" fi log_info "Cleanup complete" } # # Main test runner # main() { echo "========================================" echo "pg_scribe --new-chain Test Suite" echo "========================================" echo "" # Verify pg_scribe exists if [[ ! -x "$PG_SCRIBE" ]]; then echo "ERROR: pg_scribe not found or not executable: $PG_SCRIBE" exit 1 fi # Verify PostgreSQL is running if ! psql -U "$PGUSER" -d postgres -c "SELECT 1;" &>/dev/null; then echo "ERROR: Cannot connect to PostgreSQL" exit 1 fi # Verify wal_level is logical local wal_level wal_level=$(psql -U "$PGUSER" -d postgres -tAq -c "SHOW wal_level;") if [[ "$wal_level" != "logical" ]]; then echo "ERROR: wal_level must be 'logical', currently: $wal_level" echo "Update ~/.pgenv/pgsql/data/postgresql.conf and restart PostgreSQL" exit 1 fi # Create test directory mkdir -p "$TEST_DIR" # Set up cleanup trap trap cleanup EXIT INT TERM echo "Running tests..." echo "" # Run all tests (use || true to prevent set -e from exiting) test_new_chain_requires_args || true test_new_chain_directory_must_exist || true test_new_chain_basic_success || true test_new_chain_with_gzip_compression || true test_new_chain_no_compression || true test_new_chain_multiple_times || true test_new_chain_restorability || true test_new_chain_with_complex_schema || true test_new_chain_metadata_tracking || true test_new_chain_with_start_flag || true # Summary echo "" echo "========================================" echo "Test Results" echo "========================================" echo "Tests run: $TESTS_RUN" echo -e "Tests passed: ${GREEN}$TESTS_PASSED${NC}" echo -e "Tests failed: ${RED}$TESTS_FAILED${NC}" echo "" if [[ $TESTS_FAILED -eq 0 ]]; then echo -e "${GREEN}All tests passed!${NC}" exit 0 else echo -e "${RED}Some tests failed!${NC}" exit 1 fi } main "$@"