SCSArchiver CSStudio Configuration Scripts

Hostname
cbmdcs01
Port
5432
Database
archive
Schema
archive
Scripts
    Create Database, Users, and Scheme

    • database_owner_ship provided:
      1. add client_encoding and SEARCH PATH
      2. if database does not exist, create it
      3. call pqsl with
        • option "-f" loading modified partitioning
        • option "-a" to show all

    export PG_Owner=<database_owner, e.g. "postgres">
    export PG_Archiver=<archiver user, e.g. "archiver" >
    export PG_Reporter=<reporter user, e.g. "report">
    export PG_Archiver_pw='<archiver pw, e.g. "$archive">' 
    export PG_Reporter_pw='%pwReport%' 
    export PGUSER=<schema_owner, e.g. "archiver"> 
    export PGHOST=%host% 
    export PGPORT=5432 
    export PGDATABASE=archive 
    schema=archive
    outputdir=$(mktemp -d); 
    outputfile=$(mktemp -p ${outputdir}) ; 
    cat > ${outputfile} << EOF
    --- 
    --- 
    SET client_encoding = "UTF-8"; 
    --- create database
    DO $$
    BEGIN
    IF NOT EXISTS( SELECT datname from pg_catalog.pg_database where datname = \'$PGDATABASE\' )
    THEN
    EXECUTE \'CREATE DATABASE $PGDATABASE;\'
    END IF;
    END
    $$;
    \connect $PGDATABASE;
    
    --- create roles / users
    
    DO $$
    BEGIN
    IF NOT EXISTS( SELECT usename from pg_user where usename = \'$PG_Archiver\' )
    THEN
    EXECUTE \'CREATE USER $PG_Archiver with PASSWORD \'$PG_Archiver_pw\'\';
    EXECUTE \'ALTER USER $PG_Archiver with PASSWORD \'$PG_Archiver_pw\'\';
    END IF;
    
    IF NOT EXISTS( SELECT usename from pg_user where usename = \'$PG_Reporter\' )
    THEN
    EXECUTE \'CREATE USER $PG_Reporter with PASSWORD \'$PG_Reporter_pw\'\';
    EXECUTE \'ALTER USER $PG_Reporter with PASSWORD \'$PG_Reporter_pw\'\';
    END IF;
    END
    $$;
    
    
    -- create scheme
    DO $$
    BEGIN
    IF NOT EXISTS( SELECT schema_name FROM information_schema.schemata WHERE schema_name = \'$schema\' )
    THEN
    EXECUTE \'CREATE SCHEMA $schema\';
    EXECUTE \'GRANT USAGE on SCHEMA $schema to $PGUSER\';
    END IF;
    END
    $$;
    --
    SET SEARCH_PATH TO "$schema";
    EOF
    
    cd ${outputdir} && unset PGDATABASE && 
    psql -U PG_Owner -a -f ${outputfile} &&
    rm -ir ${outputdir} && cd -

    Load Scheme

    1. Download postgres_schema.txt from cs-studio@github
    2. Since GSI already created the database, skip DROP DATABASE, CREATE DATABASE
    3. add client_encoding and "SEARCH PATH" in front
    4. call pqsl with
      • option "-f" loading modified schema
      • option "-a" to show all

    export PGUSER=<database_owner> 
    export PGHOST=cbmdcs01 
    export PGPORT=5432 
    export PGDATABASE=archive 
    schema=archive
    URL=$css-address/raw/$path/$file 
    filename=$(basename $URL ) 
    outputdir=$(mktemp -d); outputfile=$(mktemp -p ${outputdir}) ; 
    cat > ${outputfile} << EOF
    --- 
    --- 
    SET client_encoding = "UTF-8"; 
    SET SEARCH_PATH TO "$schema";
    EOF
    cd ${outputdir} &&
    wget -N --no-check-certificate ${URL} &&
    grep -v "DROP DATABASE" ${filename} | 
    grep -v "CREATE DATABASE" | 
    grep -v -w "\\connect" >> ${outputfile} && 
    psql  -a  -f ${outputfile} &&
    rm -ir ${outputdir} && cd -

      optionally: to speed up in favour of neglecting consistency, you could drop the FOREIGN KEY constraints of the sample table:

      export PGUSER=<admin_user> 
      export PGHOST=cbmdcs01 
      export PGPORT=5432 
      export PGDATABASE=archive 
      schema=archive
      outputdir=$(mktemp -d); outputfile=$(mktemp -p ${outputdir}) ; 
      cat > ${outputfile} << EOF
      --- 
      --- 
      SET client_encoding = "UTF-8"; 
      SET SEARCH_PATH TO "$schema";
      ALTER TABLE sample DROP CONSTRAINT IF EXISTS "sample_status_id_fkey" CASCADE;
      ALTER TABLE sample DROP CONSTRAINT IF EXISTS "sample_severity_id_fkey" CASCADE;
      ALTER TABLE sample DROP CONSTRAINT IF EXISTS "sample_channel_id_fkey" CASCADE;
      EOF 
      cd ${outputdir} &&
      psql  -a  -f ${outputfile} &&
      rm -ir ${outputdir} && cd -

    Partitioning

    NOTE: alternatively, if applicable pg_partman Extension to manage partitioned tables by time or ID - not yet tested

    • database_owner_ship provided:
      1. Download postgres_partitioning.txt from cs-studio@github
      2. Since the table sample is already created, optionally including FOREIGN KEY constraints,
        • skip DROP TABLE IF EXISTS sample, though you will run into an error
      3. replace archive. prefixes by "$schema." prefix
      4. add client_encoding and SEARCH PATH in front
      5. call pqsl with
        • option "-f" loading modified partitioning
        • option "-a" to show all

    export PG_Owner=<database_owner, e.g. "postgres">
    export PGUSER=<schema_owner, e.g. "archiver"> 
    export PGHOST=cbmdcs01 
    export PGPORT=5432 
    export PGDATABASE=archive 
    schema=archive
    URL="$css-address/raw/$path/postgres_partitioning.txt" 
    filename=$(basename $URL ) 
    outputdir=$(mktemp -d); 
    outputfile=$(mktemp -p ${outputdir}) ; 
    cat > ${outputfile} << EOF
    --- 
    --- 
    SET client_encoding = "UTF-8"; 
    SET SEARCH_PATH TO "$schema";
    EOF
    
    cd ${outputdir} &&
    wget -N --no-check-certificate ${URL} &&
    perl -p -e "s/\sarchive\./ $schema./g;" -e "s/OWNER TO postgres;/OWNER TO \"${PGUSER:?PGUSER undefined}\";/g;" -e "s/DROP TABLE IF EXISTS sample;.*?$//g;" ${filename} >> ${outputfile} &&
    echo -e "\nSELECT archive.sample_update_partitions('2015-01-01'::timestamp, 'archive', '$PGUSER', 'week');" >> ${outputfile} &&
    psql -U ${PG_Owner} -a -f ${outputfile} &&
    rm -ir ${outputdir} && cd -

    Note
    if psql -U $(PG_Owner) does not work, unset PGHOST;

    crontab :
    -- This maintenance function automatically creates partitions according to the
    -- specified interval (e.g. weekly or monthly). The first partition starts at
    -- <begin_time> and ends a day/week/month/year later. This function has to be
    -- called regularly (e.g. daily by cron):
    --
    --   @daily psql  -a -h cbmdcs01 -p 5432 -U <database_owner, e.g. "postgres"> -d archive -c "SELECT archive.sample_update_partitions('2015-01-01'::timestamp, 'archive', '<database_owner, e.g. "postgres">', 'week');"
    --
    -- This function is based on a more generic version by Nicholas Whittier
    -- (http://imperialwicket.com/postgresql-automating-monthly-table-partitions).

    Install a cronjob on any relevant machine using .pgpass (q.v. Psql pgwiki.gsi.de #.pgpass)
    (german) note about the credentials
    ... da Sie vorhaben, "psql" für Ihren CronJob zu nutzen, können Sie alle relevanten PostgreSQL-Connection-Daten in Shell-Variablen hinterlegen. Im pgWiki finden Sie dazu eine Anleitung / Empfehlung von uns, wie so ein PostgreSQL-Environment aussehen könnte. Es gibt auch die Datei ".pgpass, wo Sie Passwörter für Ihre Verbindungen hinterlegen können. 1x hinterlegt, bezieht "psql" standardmäßig aus dieser Datei die passenden Passwörter. Wichtig ist nur, dass CronJob unter dem User ausgeführt wird, bei dem die Datei .pgpass hinterlegt ist.
    Sie können Ihr Environment sogar soweit "vorbereiten", dass Sie sogar den DBNamen, den Ufernamen, Port, Server, etc. bei Ihrem Cron-Aufruf nicht mehr mit angeben müssen. Die Environment-Variablen geben "psql" vor, wie und wohin er sich per Standard connecten soll.

    Grants

    1. Expect postgres_schema.txt
    2. Extract GRANT commands and for each user add those GRANTs
    3. add client_encoding and "SEARCH PATH" in front
    4. call pqsl with
      • option "-f" loading modified schema
      • option "-a" to show all

    export archive=<(list of) archiver_user(s)>)>
    export report=<(list of) report_user(s)>)>
    export PGUSER=<database_owner> 
    export PGHOST=cbmdcs01 
    export PGPORT=5432 
    export PGDATABASE=archive 
    schema=archive
    URL=$css-address/raw/$path/$file 
    filename=$(basename $URL ) 
    outputdir=$(mktemp -d); 
    outputfile=$(mktemp -p ${outputdir}) ; 
    cat > ${outputfile} << EOF
    --- 
    --- 
    SET client_encoding = "UTF-8"; 
    SET SEARCH PATH TO "$schema";
    EOF
    cd ${outputdir} &&
    wget -N --no-check-certificate ${URL} &&
    for item in $(echo $archive);
    do
    perl -ne 'if (/^\bGRANT\b/ ... / TO (\w+);/m) {~s/^-- .*?\n//g; ~ s/\n/ /g; ~s/;\s*/;\n/g; print;}' ${filename} | perl -ne "if (/TO archive/) {~s/TO archive/TO $item/g;print;}" >> $outputfile
    done
    for item in $(echo $report);
    do 
    perl -ne 'if (/^\bGRANT\b/ ... / TO (\w+);/m) {~s/^-- .*?\n//g; ~ s/\n/ /g; ~s/;\s*/;\n/g; print;}' ${filename} | perl -ne "if (/TO report/) {~s/TO report/TO $item/g;print;}" >> $outputfile
    done
    psql  -a -f $outputfile &&
    cd - &&
    rm -ir ${outputdir} &&
    unset outputdir outputfile archive report 


-- PeterZumbruch - 03 Jun 2016
-- PeterZumbruch - 04 Nov 2016

This topic: DaqSlowControl > HadesDaqDocumentation > DetectorControlSystem > SCSArchiverCSStudio > SCSArchiverCSStudioConfigurationScripts
Topic revision: 04 Nov 2016, PeterZumbruch
 
This site is powered by FoswikiCopyright © by the contributing authors. All material on this collaboration platform is the property of the contributing authors.
Ideas, requests, problems regarding Hades Wiki? Send feedback
Imprint (in German)
Privacy Policy (in German)