--name="Skytools" --html --no-private
HTMLS = londiste.cmdline.html londiste.config.html README.html INSTALL.html \
- londiste.ref.html TODO.html pgq-sql.html pgq-admin.html
+ londiste.ref.html TODO.html pgq-sql.html pgq-admin.html pgq-nodupes.html \
+ walmgr.html
all: man
-man: londiste.1 londiste.5 pgqadm.1
+man: londiste.1 londiste.5 pgqadm.1 walmgr.1
html: $(HTMLS)
install: man
mkdir -p $(DESTDIR)/$(mandir)/man1
mkdir -p $(DESTDIR)/$(mandir)/man5
install -m 644 londiste.1 $(DESTDIR)/$(mandir)/man1
- install -m 644 londiste.5 $(DESTDIR)/$(mandir)/man1
+ install -m 644 londiste.5 $(DESTDIR)/$(mandir)/man5
+ install -m 644 pgqadm.1 $(DESTDIR)/$(mandir)/man1
+ install -m 644 walmgr.1 $(DESTDIR)/$(mandir)/man1
old.wiki.upload:
devupload.sh overview.txt $(wiki)
- devupload.sh TODO.txt $(wiki)/ToDo
+ #devupload.sh TODO.txt $(wiki)/ToDo
#devupload.sh londiste.txt $(wiki)/LondisteUsage
- devupload.sh londiste.ref.txt $(wiki)/LondisteReference
- devupload.sh pgq-sql.txt $(wiki)/PgQdocs
- devupload.sh pgq-nodupes.txt $(wiki)/PgqNoDupes
- devupload.sh walmgr.txt $(wiki)/WalMgr
- devupload.sh pgq-admin.txt $(wiki)/PgqAdm
+ #devupload.sh londiste.ref.txt $(wiki)/LondisteReference
+ #devupload.sh pgq-sql.txt $(wiki)/PgQdocs
+ #devupload.sh pgq-nodupes.txt $(wiki)/PgqNoDupes
+ #devupload.sh walmgr.txt $(wiki)/WalMgr
+ #devupload.sh pgq-admin.txt $(wiki)/PgqAdm
PY_PKGS = skytools pgq londiste
pgqadm.1: pgq-admin.xml
xmlto man $<
+walmgr.1: walmgr.xml
+ xmlto man $<
+
%.xml: %.txt
#asciidoc -b docbook -d manpage -o - $< > $@
asciidoc -b docbook -d manpage -o - $< \
\r
Consumer itself can tag events for retry, but then it must be able to handle them later.\r
\r
- * If the PgQ queue and event data handling happen in same database,\r
- the consumer must simply call pgq.finish_batch() inside the event-processing\r
- transaction.\r
\r
- * If the event processing happens in different database, the consumer\r
- must store the batch_id into destination database, inside the same\r
- transaction as the event processing happens.\r
+== Only one db ==\r
\r
- Only after committing it, consumer can call pgq.finish_batch() in queue database\r
- and commit that.\r
+If the PgQ queue and event data handling happen in same database,\r
+the consumer must simply call pgq.finish_batch() inside the event-processing\r
+transaction.\r
\r
- As the batches come in sequence, there's no need to remember full log of batch_id's,\r
- it's enough to keep the latest batch_id.\r
+== Several databases ==\r
\r
- Then at the start of every batch, consumer can check if the batch_id already\r
- exists in destination database, and if it does, then just tag batch done,\r
- without processing.\r
+If the event processing happens in different database, the consumer\r
+must store the batch_id into destination database, inside the same\r
+transaction as the event processing happens.\r
+\r
+- Only after committing it, consumer can call pgq.finish_batch() in queue database\r
+ and commit that.\r
+\r
+- As the batches come in sequence, there's no need to remember full log of batch_id's,\r
+ it's enough to keep the latest batch_id.\r
+\r
+- Then at the start of every batch, consumer can check if the batch_id already\r
+ exists in destination database, and if it does, then just tag batch done,\r
+ without processing.\r
\r
With this, there's no need for consumer to check for already processed\r
events.\r
\r
-NB: This assumes the event processing is transaction-able - failures\r
+== Note ==\r
+\r
+This assumes the event processing is transaction-able - failures\r
will be rollbacked. If event processing includes communication with\r
world outside database, eg. sending email, such handling won't work.\r
Get list of queues.
-Result: ()
+Result: (queue_name, queue_ntables, queue_cur_table, queue_rotation_period, queue_switch_time, queue_external_ticker, queue_ticker_max_count, queue_ticker_max_lag, queue_ticker_idle_period, ticker_lag)
pgq.get_consumer_info()
pgq.get_consumer_info(queue_name)
Get list of active consumers.
-Result: ()
+Result: (queue_name, consumer_name, lag, last_seen, last_tick, current_batch, next_tick)
pgq.get_batch_info(batch_id)
Get info about batch.
-Result fields: ()
+Result fields: (queue_name, consumer_name, batch_start, batch_end, prev_tick_id, tick_id, lag)
== Notes ==
-Consumer '''must''' be able to process same event several times.
+Consumer *must* be able to process same event several times.
== Example ==
select pgq.event_retry(batch_id, event_id, 60);
select pgq.event_failed(batch_id, event_id, 'Record deleted');
-When all done, notify core about it:
+When all done, notify database about it:
select pgq.finish_batch(batch_id)
-#pragma section-numbers 2
-= WalMgr =
+= walmgr(1) =
-[[TableOfContents]]
+== NAME ==
+
+walmgr - tools for managing WAL-based replication for PostgreSQL.
+
+== SYNOPSIS ==
+
+ walmgr.py <config.ini> command
== Step-by-step instructions ==
-=== no-password ssh access from one to other ===
+1. Set up no-password ssh access from one to other
master$ test -f ~/.ssh/id_dsa.pub || ssh-keygen -t dsa
master$ scp .ssh/id_dsa.pub slave:
slave$ cat id_dsa.pub >> ~/.ssh/authorized_keys
-=== Configure paths ===
+2. Configure paths
master$ edit master.ini
slave$ edit slave.ini
slave$ mkdir data.master logs.full logs.partial
-=== Start archival process ===
+3. Start archival process
master$ ./walmgr.py master.ini setup
-=== Do full backup+restore ===
+4. Do full backup+restore
master$ ./walmgr.py master.ini backup
slave$ ./walmgr.py slave.ini restore
'walmgr.py restore' moves data in place and starts postmaster,
that starts replaying logs as they appear.
-=== In-progress WAL segments can be backup by command: ===
+5. In-progress WAL segments can be backup by command:
master$ ./walmgr.py master.ini sync
-=== If need to stop replay on slave and boot into normal mode, do: ===
+6. If need to stop replay on slave and boot into normal mode, do:
slave$ ./walmgr.py slave.ini boot
-== Configuration ==
+== CONFIGURATION ==
=== master.ini ===
-{{{
-[wal-master]
-logfile = master.log
-use_skylog = 0
+ [wal-master]
+ logfile = master.log
+ use_skylog = 0
-master_db = dbname=template1
-master_data = /var/lib/postgresql/8.0/main
-master_config = /etc/postgresql/8.0/main/postgresql.conf
+ master_db = dbname=template1
+ master_data = /var/lib/postgresql/8.0/main
+ master_config = /etc/postgresql/8.0/main/postgresql.conf
-slave = slave:/var/lib/postgresql/walshipping
+ slave = slave:/var/lib/postgresql/walshipping
-completed_wals = %(slave)s/logs.complete
-partial_wals = %(slave)s/logs.partial
-full_backup = %(slave)s/data.master
+ completed_wals = %(slave)s/logs.complete
+ partial_wals = %(slave)s/logs.partial
+ full_backup = %(slave)s/data.master
-# syncdaemon update frequency
-loop_delay = 10.0
-
-}}}
+ # syncdaemon update frequency
+ loop_delay = 10.0
=== slave.ini ===
-{{{
-[wal-slave]
-logfile = slave.log
-use_skylog = 0
+ [wal-slave]
+ logfile = slave.log
+ use_skylog = 0
+
+ slave_data = /var/lib/postgresql/8.0/main
+ slave_stop_cmd = /etc/init.d/postgresql-8.0 stop
+ slave_start_cmd = /etc/init.d/postgresql-8.0 start
-slave_data = /var/lib/postgresql/8.0/main
-slave_stop_cmd = /etc/init.d/postgresql-8.0 stop
-slave_start_cmd = /etc/init.d/postgresql-8.0 start
+ slave = /var/lib/postgresql/walshipping
+ completed_wals = %(slave)s/logs.complete
+ partial_wals = %(slave)s/logs.partial
+ full_backup = %(slave)s/data.master
-slave = /var/lib/postgresql/walshipping
-completed_wals = %(slave)s/logs.complete
-partial_wals = %(slave)s/logs.partial
-full_backup = %(slave)s/data.master
+ keep_old_logs = 0
-keep_old_logs = 0
-}}}