kopia lustrzana https://github.com/kartoza/docker-postgis
Fix scenario tests
- Fix cache keys for GH Action - Disable logical_replication test health-check By disabling health-check, we can immediately see the logspull/289/head
rodzic
3e5c371720
commit
0df962e1f8
|
@ -47,9 +47,12 @@ jobs:
|
||||||
# - Dockerfile recipe
|
# - Dockerfile recipe
|
||||||
# - docker-compose.build.yml
|
# - docker-compose.build.yml
|
||||||
# - build args (.example.env)
|
# - build args (.example.env)
|
||||||
key: buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env') }}-${{ github.sha }}
|
# - base_build directory
|
||||||
|
# - scripts directory
|
||||||
|
key: buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env', 'base_build', 'scripts') }}-${{ github.sha }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env') }}-
|
buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env', 'base_build', 'scripts') }}-
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
|
@ -139,7 +142,6 @@ jobs:
|
||||||
- collations
|
- collations
|
||||||
- extensions
|
- extensions
|
||||||
- logical_replication
|
- logical_replication
|
||||||
- logical_replications
|
|
||||||
include:
|
include:
|
||||||
- distro: debian
|
- distro: debian
|
||||||
imageVersion: bullseye
|
imageVersion: bullseye
|
||||||
|
@ -158,9 +160,11 @@ jobs:
|
||||||
# - Dockerfile recipe
|
# - Dockerfile recipe
|
||||||
# - docker-compose.build.yml
|
# - docker-compose.build.yml
|
||||||
# - build args (.example.env)
|
# - build args (.example.env)
|
||||||
key: buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env') }}-${{ github.sha }}
|
# - base_build directory
|
||||||
|
# - scripts directory
|
||||||
|
key: buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env', 'base_build', 'scripts') }}-${{ github.sha }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env') }}-
|
buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env', 'base_build', 'scripts') }}-
|
||||||
|
|
||||||
- name: Build image for testing
|
- name: Build image for testing
|
||||||
id: docker_build_testing_image
|
id: docker_build_testing_image
|
||||||
|
|
|
@ -48,9 +48,16 @@ jobs:
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v2
|
||||||
with:
|
with:
|
||||||
path: /tmp/.buildx-cache
|
path: /tmp/.buildx-cache
|
||||||
key: buildx-${{ matrix.distro }}-${{ matrix.imageVersion }}-${{ matrix.imageVariant }}-${{ github.sha }}
|
# Build inputs are:
|
||||||
|
# - Dockerfile recipe
|
||||||
|
# - docker-compose.build.yml
|
||||||
|
# - build args (.example.env)
|
||||||
|
# - base_build directory
|
||||||
|
# - scripts directory
|
||||||
|
key: buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env', 'base_build', 'scripts') }}-${{ github.sha }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
buildx-${{ matrix.distro }}-${{ matrix.imageVersion }}-${{ matrix.imageVariant }}-
|
buildx-${{ hashFiles('Dockerfile', 'docker-compose.build.yml', '.example.env', 'base_build', 'scripts') }}-
|
||||||
|
|
||||||
- name: Login to DockerHub
|
- name: Login to DockerHub
|
||||||
uses: docker/login-action@v1
|
uses: docker/login-action@v1
|
||||||
with:
|
with:
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.python-version
|
.python-version
|
||||||
venv
|
venv
|
||||||
|
.venv
|
||||||
__pycache__
|
__pycache__
|
||||||
|
|
||||||
.env
|
.env
|
||||||
|
|
|
@ -47,8 +47,7 @@ services:
|
||||||
REPLICATION_PASS: 'replicator'
|
REPLICATION_PASS: 'replicator'
|
||||||
REPLICATION: 'true'
|
REPLICATION: 'true'
|
||||||
depends_on:
|
depends_on:
|
||||||
pg-publisher:
|
- pg-publisher
|
||||||
condition: service_healthy
|
|
||||||
# You can expose the port to observe it in your local machine
|
# You can expose the port to observe it in your local machine
|
||||||
# For this sample, it was disabled by default to allow scaling test
|
# For this sample, it was disabled by default to allow scaling test
|
||||||
ports:
|
ports:
|
||||||
|
|
|
@ -8,22 +8,26 @@ source ../test-env.sh
|
||||||
# Run service
|
# Run service
|
||||||
docker-compose up -d
|
docker-compose up -d
|
||||||
|
|
||||||
sleep 5
|
if [[ -n "${PRINT_TEST_LOGS}" ]]; then
|
||||||
|
docker-compose logs -f &
|
||||||
|
fi
|
||||||
|
|
||||||
|
sleep 60
|
||||||
|
|
||||||
# Preparing publisher cluster
|
# Preparing publisher cluster
|
||||||
until docker-compose exec pg-publisher pg_isready; do
|
until docker-compose exec -T pg-publisher pg_isready; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done;
|
done;
|
||||||
|
|
||||||
# Execute tests
|
# Execute tests
|
||||||
docker-compose exec pg-publisher /bin/bash /tests/test_publisher.sh
|
docker-compose exec -T pg-publisher /bin/bash /tests/test_publisher.sh
|
||||||
|
|
||||||
# Preparing node cluster
|
# Preparing node cluster
|
||||||
until docker-compose exec pg-subscriber pg_isready; do
|
until docker-compose exec -T pg-subscriber pg_isready; do
|
||||||
sleep 1
|
sleep 1
|
||||||
done;
|
done;
|
||||||
|
|
||||||
# Execute tests
|
# Execute tests
|
||||||
docker-compose exec pg-subscriber /bin/bash /tests/test_subscriber.sh
|
docker-compose exec -T pg-subscriber /bin/bash /tests/test_subscriber.sh
|
||||||
|
|
||||||
docker-compose down -v
|
docker-compose down -v
|
||||||
|
|
|
@ -72,7 +72,7 @@ class TestReplicationSubscriber(unittest.TestCase):
|
||||||
try:
|
try:
|
||||||
output = func_action()
|
output = func_action()
|
||||||
func_assert(output)
|
func_assert(output)
|
||||||
print('Assertion succes')
|
print('Assertion success')
|
||||||
return
|
return
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
last_error = e
|
last_error = e
|
||||||
|
|
|
@ -1,104 +0,0 @@
|
||||||
|
|
||||||
version: '2.1'
|
|
||||||
|
|
||||||
volumes:
|
|
||||||
pg-master-data-dir:
|
|
||||||
pg-node-data-dir:
|
|
||||||
|
|
||||||
services:
|
|
||||||
pg-publisher:
|
|
||||||
image: 'kartoza/postgis:${TAG:-manual-build}'
|
|
||||||
restart: 'always'
|
|
||||||
# You can optionally mount to volume, to play with the persistence and
|
|
||||||
# observe how the node will behave after restarts.
|
|
||||||
volumes:
|
|
||||||
- pg-master-data-dir:/var/lib/postgresql
|
|
||||||
- ./scripts/setup-publisher.sql:/docker-entrypoint-initdb.d/setup-publisher.sql
|
|
||||||
- ./tests:/tests
|
|
||||||
- ../utils:/lib/utils
|
|
||||||
environment:
|
|
||||||
# ALLOW_IP_RANGE option is used to specify additionals allowed domains
|
|
||||||
# in pg_hba.
|
|
||||||
# This range should allow nodes to connect to master
|
|
||||||
ALLOW_IP_RANGE: '0.0.0.0/0'
|
|
||||||
|
|
||||||
# We can specify optional credentials
|
|
||||||
REPLICATION_USER: 'replicator'
|
|
||||||
REPLICATION_PASS: 'replicator'
|
|
||||||
WAL_LEVEL: 'logical'
|
|
||||||
# Setup master replication variables
|
|
||||||
#PG_MAX_WAL_SENDERS: 8
|
|
||||||
#PG_WAL_KEEP_SEGMENTS: 100
|
|
||||||
# You can expose the port to observe it in your local machine
|
|
||||||
ports:
|
|
||||||
- "7777:5432"
|
|
||||||
healthcheck:
|
|
||||||
interval: 60s
|
|
||||||
timeout: 30s
|
|
||||||
retries: 3
|
|
||||||
test: "pg_isready"
|
|
||||||
|
|
||||||
pg-subscriber:
|
|
||||||
image: 'kartoza/postgis:${TAG:-manual-build}'
|
|
||||||
restart: 'always'
|
|
||||||
# You can optionally mount to volume, but we're not able to scale it
|
|
||||||
# in that case.
|
|
||||||
# The node will always destroy its database and copy from master at
|
|
||||||
# runtime
|
|
||||||
volumes:
|
|
||||||
- pg-node-data-dir:/var/lib/postgresql
|
|
||||||
- ./scripts/setup-subscriber.sql:/docker-entrypoint-initdb.d/setup-subscriber.sql
|
|
||||||
- ./tests:/tests
|
|
||||||
- ../utils:/lib/utils
|
|
||||||
|
|
||||||
environment:
|
|
||||||
# ALLOW_IP_RANGE option is used to specify additionals allowed domains
|
|
||||||
# in pg_hba.
|
|
||||||
# Not really needed in nodes for the replication, but optionally can
|
|
||||||
# be put when nodes are needed to be a failover server when master
|
|
||||||
# is down. The IP Range are generally needed if other services wants to
|
|
||||||
# connect to this node
|
|
||||||
ALLOW_IP_RANGE: '0.0.0.0/0'
|
|
||||||
|
|
||||||
# REPLICATE_FROM options accepts domain-name or IP address
|
|
||||||
# with this in mind, you can also put docker service name, because it
|
|
||||||
# will be resolved as host name.
|
|
||||||
#REPLICATE_FROM: 'pg-publisher'
|
|
||||||
|
|
||||||
# REPLICATE_PORT will default to 5432 if not specified.
|
|
||||||
# REPLICATE_PORT: '5432'
|
|
||||||
# In the case where you need to replicate from outside service,
|
|
||||||
# you can put the server address and port here, as long as the target
|
|
||||||
# where configured as master, and replicable.
|
|
||||||
# REPLICATE_FROM: '192.168.1.8'
|
|
||||||
# REPLICATE_PORT: '7777'
|
|
||||||
|
|
||||||
# DESTROY_DATABASE_ON_RESTART will default to True if not specified.
|
|
||||||
# If specified other than True, it will prevent node from destroying
|
|
||||||
# database on restart
|
|
||||||
DESTROY_DATABASE_ON_RESTART: 'True'
|
|
||||||
|
|
||||||
# PROMOTE_MASTER Default empty.
|
|
||||||
# If specified with any value, then it will convert current node into
|
|
||||||
# a writable state. Useful if master is down and the current node needs
|
|
||||||
# to be promoted until manual recovery.
|
|
||||||
# PROMOTE_MASTER: 'True'
|
|
||||||
|
|
||||||
# For now we don't support different credentials for replication
|
|
||||||
# so we use the same credentials as master's superuser, or anything that
|
|
||||||
# have replication role.
|
|
||||||
REPLICATION_USER: 'replicator'
|
|
||||||
REPLICATION_PASS: 'replicator'
|
|
||||||
WAL_LEVEL: 'logical'
|
|
||||||
depends_on:
|
|
||||||
pg-publisher:
|
|
||||||
condition: service_healthy
|
|
||||||
# You can expose the port to observe it in your local machine
|
|
||||||
# For this sample, it was disabled by default to allow scaling test
|
|
||||||
ports:
|
|
||||||
- "7776:5432"
|
|
||||||
# healthcheck:
|
|
||||||
# interval: 60s
|
|
||||||
# timeout: 30s
|
|
||||||
# retries: 3
|
|
||||||
# test: "pg_isready"
|
|
|
@ -1,22 +0,0 @@
|
||||||
-- Create a table
|
|
||||||
CREATE TABLE sweets
|
|
||||||
(
|
|
||||||
id SERIAL,
|
|
||||||
name TEXT,
|
|
||||||
price DECIMAL,
|
|
||||||
CONSTRAINT sweets_pkey PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.block (
|
|
||||||
id serial NOT NULL,
|
|
||||||
geom public.geometry(Polygon,4326),
|
|
||||||
fid bigint,
|
|
||||||
tile_name character varying,
|
|
||||||
location character varying
|
|
||||||
);
|
|
||||||
|
|
||||||
-- Add table to publication called logical_replication which is created by the scripts
|
|
||||||
ALTER PUBLICATION logical_replication ADD TABLE sweets;
|
|
||||||
ALTER PUBLICATION logical_replication ADD TABLE block;
|
|
||||||
-- Inserts records into the table
|
|
||||||
INSERT INTO sweets (name, price) VALUES ('strawberry', 4.50), ('Coffee', 6.20), ('lollipop', 3.80);
|
|
|
@ -1,20 +0,0 @@
|
||||||
-- Create a table
|
|
||||||
CREATE TABLE sweets
|
|
||||||
(
|
|
||||||
id SERIAL,
|
|
||||||
name TEXT,
|
|
||||||
price DECIMAL,
|
|
||||||
CONSTRAINT sweets_pkey PRIMARY KEY (id)
|
|
||||||
);
|
|
||||||
|
|
||||||
CREATE TABLE public.block (
|
|
||||||
id serial NOT NULL,
|
|
||||||
geom public.geometry(Polygon,4326),
|
|
||||||
fid bigint,
|
|
||||||
tile_name character varying,
|
|
||||||
location character varying
|
|
||||||
);
|
|
||||||
-- Create a publication
|
|
||||||
CREATE SUBSCRIPTION logical_subscription
|
|
||||||
CONNECTION 'host=pg-publisher port=5432 password=docker user=docker dbname=gis'
|
|
||||||
PUBLICATION logical_replication;
|
|
|
@ -1,33 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# exit immediately if test fails
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source ../test-env.sh
|
|
||||||
|
|
||||||
# Run service
|
|
||||||
docker-compose up -d
|
|
||||||
|
|
||||||
if [[ -n "${PRINT_TEST_LOGS}" ]]; then
|
|
||||||
docker-compose logs -f &
|
|
||||||
fi
|
|
||||||
|
|
||||||
sleep 30
|
|
||||||
|
|
||||||
# Preparing master cluster
|
|
||||||
until docker-compose exec -T pg-publisher pg_isready; do
|
|
||||||
sleep 1
|
|
||||||
done;
|
|
||||||
|
|
||||||
# Execute tests
|
|
||||||
docker-compose exec -T pg-publisher /bin/bash /tests/test_master.sh
|
|
||||||
|
|
||||||
# Preparing node cluster
|
|
||||||
until docker-compose exec -T pg-subscriber pg_isready; do
|
|
||||||
sleep 1
|
|
||||||
done;
|
|
||||||
|
|
||||||
# Execute tests
|
|
||||||
docker-compose exec -T pg-node /bin/bash /tests/test_node.sh
|
|
||||||
|
|
||||||
docker-compose down -v
|
|
|
@ -1,13 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source /scripts/env-data.sh
|
|
||||||
|
|
||||||
# execute tests
|
|
||||||
pushd /tests
|
|
||||||
|
|
||||||
PGHOST=localhost \
|
|
||||||
PGDATABASE=gis \
|
|
||||||
PYTHONPATH=/lib \
|
|
||||||
python3 -m unittest -v test_replication.TestReplicationMaster
|
|
|
@ -1,13 +0,0 @@
|
||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
source /scripts/env-data.sh
|
|
||||||
|
|
||||||
# execute tests
|
|
||||||
pushd /tests
|
|
||||||
|
|
||||||
PGHOST=localhost \
|
|
||||||
PGDATABASE=gis \
|
|
||||||
PYTHONPATH=/lib \
|
|
||||||
python3 -m unittest -v test_replication.TestReplicationNode
|
|
|
@ -1,75 +0,0 @@
|
||||||
import unittest
|
|
||||||
from utils.utils import DBConnection
|
|
||||||
|
|
||||||
|
|
||||||
class TestReplicationMaster(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.db = DBConnection()
|
|
||||||
|
|
||||||
def test_create_new_data(self):
|
|
||||||
# create new table
|
|
||||||
self.db.conn.autocommit = True
|
|
||||||
with self.db.cursor() as c:
|
|
||||||
c.execute(
|
|
||||||
"""
|
|
||||||
CREATE TABLE IF NOT EXISTS test_replication_table (
|
|
||||||
id integer not null
|
|
||||||
constraint pkey primary key,
|
|
||||||
geom geometry(Point, 4326),
|
|
||||||
name varchar(30),
|
|
||||||
alias varchar(30),
|
|
||||||
description varchar(255)
|
|
||||||
);
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
c.execute(
|
|
||||||
"""
|
|
||||||
ALTER PUBLICATION logical_replication
|
|
||||||
ADD TABLE test_replication_table;
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
c.execute(
|
|
||||||
"""
|
|
||||||
INSERT INTO test_replication_table (id, geom, name, alias, description)
|
|
||||||
VALUES
|
|
||||||
(
|
|
||||||
1,
|
|
||||||
st_setsrid(st_point(107.6097, 6.9120), 4326),
|
|
||||||
'Bandung',
|
|
||||||
'Paris van Java',
|
|
||||||
'Asia-Africa conference was held here'
|
|
||||||
) ON CONFLICT DO NOTHING;
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class TestReplicationNode(unittest.TestCase):
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
self.db = DBConnection()
|
|
||||||
|
|
||||||
def test_read_data(self):
|
|
||||||
# create new table
|
|
||||||
self.db.conn.autocommit = True
|
|
||||||
with self.db.cursor() as c:
|
|
||||||
c.execute(
|
|
||||||
"""
|
|
||||||
CREATE SUBSCRIPTION logical_subscription
|
|
||||||
CONNECTION 'host=${PG_PUBLISHER} port=5432
|
|
||||||
password=${PG_PASSWORD} user=${PG_USER} dbname=gis'
|
|
||||||
PUBLICATION logical_replication;
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
c.execute(
|
|
||||||
"""
|
|
||||||
SELECT * FROM test_replication_table;
|
|
||||||
"""
|
|
||||||
)
|
|
||||||
|
|
||||||
rows = c.fetchall()
|
|
||||||
self.assertEqual(len(rows), 1)
|
|
||||||
|
|
|
@ -45,7 +45,7 @@ if [[ "${REPLICATION}" =~ [Tt][Rr][Uu][Ee] && "$WAL_LEVEL" == 'logical' ]]; the
|
||||||
if [[ -f ${ROOT_CONF}/logical_replication.conf ]];then
|
if [[ -f ${ROOT_CONF}/logical_replication.conf ]];then
|
||||||
rm $CONF/logical_replication.conf
|
rm $CONF/logical_replication.conf
|
||||||
fi
|
fi
|
||||||
cat >> ${ROOT_CONF}/streaming_replication.conf <<EOF
|
cat >> ${ROOT_CONF}/logical_replication.conf <<EOF
|
||||||
wal_level = ${WAL_LEVEL}
|
wal_level = ${WAL_LEVEL}
|
||||||
max_wal_senders = ${PG_MAX_WAL_SENDERS}
|
max_wal_senders = ${PG_MAX_WAL_SENDERS}
|
||||||
wal_keep_size = ${PG_WAL_KEEP_SIZE}
|
wal_keep_size = ${PG_WAL_KEEP_SIZE}
|
||||||
|
|
Ładowanie…
Reference in New Issue