diff --git a/docs/30_Configuration/20_Database.md b/docs/30_Configuration/20_Database.md
index c5214cd1f020554fcd8834c666284da95d46d033..3f5af59ffb777bec2b23445f84cdb2731d298887 100644
--- a/docs/30_Configuration/20_Database.md
+++ b/docs/30_Configuration/20_Database.md
@@ -1,6 +1,6 @@
-# Database backups #
+# Database backups
 
-## Shared settings for all database types ##
+## Shared settings for all database types
 
 There are 2 required values in the jobs/backup.job
 
@@ -109,10 +109,9 @@ dbpassword = '12345678'
 params = '--port={tcp-port} --password={dbpassword} --user={dbuser} --host={tcp-target}'
 
 env = 'export var1="happy meal"; export var2="new"; export var3="year!"'
-
 ```
 
-## Backup sqlite ##
+## Backup sqlite
 
 Sqlite files can be located anywhere in the filesystem. That's why the
 cannot be located with an auto detection. 
@@ -135,7 +134,8 @@ This is the plugins/localdump/profile/sqlite.ini.example:
 type = "sqlite"
 
 # list of files to backup
-# file[] = "/var/www/database/logs.db"
+file[] = "/var/www/database/logs.db"
+file[] = "/var/www/database/another.db"
 
 
 [set]
diff --git a/docs/40_Usage/20_Database.md b/docs/40_Usage/20_Database.md
index 74f89365a467fa90b816f024dbe764d51ba3d274..805da55c8c201a12d10d4b8750c2b22d514892b7 100644
--- a/docs/40_Usage/20_Database.md
+++ b/docs/40_Usage/20_Database.md
@@ -1,7 +1,7 @@
 ## Description
 
 To create backup database dumps without transfer of local directory to a backup target use `sudo ./localdump.sh`.
-Backup dumps will be stored as gzip files into `/var/iml-backup/[profile]`.
+Backup dumps will be stored as gzip files into `/var/iml-backup/<profile>`.
 
 ## Help
 
@@ -66,14 +66,14 @@ sudo ./localdump.sh backup mysql
 
 ## Structure in the backup folder
 
-In the database dump folder is a subdir per service `/var/iml-backup/[profile]`.
+In the database dump folder is a subdir per service `/var/iml-backup/<profile>`.
 
-Below the service folder are files named like the database scheme + `__` + timestamp.
+Below the service folder are files named like the `<database scheme>__<timestamp>`.
 
 All dumps are gzip compressed.
 
 At the end of a backup task with localdump.sh older files older than *keep-days* 
-will be deleted from `/var/iml-backup/[service]`.
+will be deleted from `/var/iml-backup/<profile>`.
 
 ### Backup sqlite
 
diff --git a/localdump.sh b/localdump.sh
index 2c4f13441a0272f585f5c92927e00306a74461a1..c1a6df5506278ae204ff1b0724507703f799ce7e 100755
--- a/localdump.sh
+++ b/localdump.sh
@@ -18,6 +18,7 @@
 # 2022-11-04  ah     rename hooks
 # 2024-03-14  ah     v2.0: use profiles for local and remote databases
 # 2024-03-18  ah     fix for db detection from file and cli restore
+# 2024-10-02  ah     reset $rc before calling db plugin
 # ======================================================================
 
 # --- variables:
@@ -486,6 +487,8 @@ EOH
                 _j_runHooks "200-before-db-service"
   
                 h3 "BACKUP [${PROFILENAME}] -> ${SERVICENAME}"
+                rcbak=$rc
+                rc=0
                 . $BACKUP_SCRIPT $mode
   
                 test $rc -gt 0 && j_notify "db ${SERVICENAME}" "$BACKUP_SCRIPT $mode was finished with rc=$rc" $rc
@@ -497,6 +500,8 @@ EOH
                 # ----- post jobs: cleanup
                 cleanup_backup_target
                 show_info_backup_target
+
+                rc=$rc+$rcbak
     
             else
   
diff --git a/plugins/localdump/couchdb2.sh b/plugins/localdump/couchdb2.sh
index 653efcef62f35cf12e663843294d137544bedd91..ee352ce2beb18afd69cf402eaa926c8a4d277106 100755
--- a/plugins/localdump/couchdb2.sh
+++ b/plugins/localdump/couchdb2.sh
@@ -14,22 +14,23 @@
 # ah - Axel Hahn <axel.hahn@iml.unibe.ch>
 # ds - Daniel Schueler <daniel.schueler@iml.unibe.ch>
 #
-# 2019-11-13  .....  v1.0  initial version with backup and restore (single DB)
-# 2020-05-19  .....  v1.1  backup a single or multiple couchdb instances by globbing param
-#                          ./localdump.sh backup couchdb2 demo
-# 2021-10-11  .....  v1.2  added fastmode in restore: no test connect, do not 
-#                          delete DB before create request
-# 2022-01-20         v1.3  fixes with shellcheck
-# 2022-03-17         v1.4  WIP: add lines with prefix __DB__
-# 2022-04-07         v1.5  check archive file, not only seq file
-# 2022-04-14         v1.6  backup security infos (no restore yet)
-# 2022-04-21         v1.7  restore security infos
-# 2022-10-07  ah     v1.8  unescape regex with space to prevent "grep: warning: stray \ before white space"
-# 2023-06-06  ah     v1.9  show a warning if the sequence id was not fetched
-# 2023-06-12  ah     v1.10 skip couchdb dump if no sequence id was detected (=db deleted since fetching list of all dbs)
-# 2023-06-26  ah     v1.11 speed up detection of changed databases
-# 2023-06-27  ah     v1.12 enable tmp file for dblist again (faster); speedup loops in backup
-# 2023-06-28  ah     v1.13 optimize backup move OUTFILE; measure time; cache backed up sequence ids
+# 2019-11-13  .....  v1.0   initial version with backup and restore (single DB)
+# 2020-05-19  .....  v1.1   backup a single or multiple couchdb instances by globbing param
+#                           ./localdump.sh backup couchdb2 demo
+# 2021-10-11  .....  v1.2   added fastmode in restore: no test connect, do not 
+#                           delete DB before create request
+# 2022-01-20         v1.3   fixes with shellcheck
+# 2022-03-17         v1.4   WIP: add lines with prefix __DB__
+# 2022-04-07         v1.5   check archive file, not only seq file
+# 2022-04-14         v1.6   backup security infos (no restore yet)
+# 2022-04-21         v1.7   restore security infos
+# 2022-10-07  ah     v1.8   unescape regex with space to prevent "grep: warning: stray \ before white space"
+# 2023-06-06  ah     v1.9   show a warning if the sequence id was not fetched
+# 2023-06-12  ah     v1.10  skip couchdb dump if no sequence id was detected (=db deleted since fetching list of all dbs)
+# 2023-06-26  ah     v1.11  speed up detection of changed databases
+# 2023-06-27  ah     v1.12  enable tmp file for dblist again (faster); speedup loops in backup
+# 2023-06-28  ah     v1.13  optimize backup move OUTFILE; measure time; cache backed up sequence ids
+# 2024-10-02  ah     v1.14  rename backup and restore function
 # ================================================================================
 
 if [ -z "$BACKUP_TARGETDIR" ]; then
@@ -38,19 +39,6 @@ if [ -z "$BACKUP_TARGETDIR" ]; then
   exit 1
 fi
 
-# --------------------------------------------------------------------------------
-# CONFIG
-# --------------------------------------------------------------------------------
-
-# contains *.config files for each instance
-CFGDIR=~/.iml_backup/couchdb2
-
-# UNUSED
-# dirPythonPackages=/usr/lib/python2.7/site-packages
-
-# now set in localdump.sh
-# ARCHIVE_DIR=$(_j_getvar "${JOBFILE}" dir-dbarchive)/couchdb2
-
 # --------------------------------------------------------------------------------
 # FUNCTIONS
 # --------------------------------------------------------------------------------
@@ -142,7 +130,7 @@ function reqCombined(){
 
 # backup with loop over instances
 # param 1  string  globbing filter to config files
-function doBackup(){
+function couchdb2.backup(){
 
     echo "--- instance: $PROFILENAME"
     if curl --head -X GET "$COUCH_URL" 2>/dev/null | grep "^HTTP.* 200 "; then
@@ -357,7 +345,7 @@ function _doBackupOfSingleInstance(){
 # restore a single backup file; the instance and db name will be detected from file
 # param  string  filename of db dump (full path or relative to BACKUP_TARGETDIR)
 # param  string  optional: target database; default: detect name from import database 
-function restoreByFile(){
+function couchdb2.restore(){
     sMyfile=$1
     dbname=$2
 
@@ -374,7 +362,8 @@ function restoreByFile(){
     #   exit 1
     # fi
 
-    local _sourceDB="$( guessDB $sMyfile | sed 's#.couchdbdump.gz$##' )"
+    local _sourceDB
+    _sourceDB="$( guessDB $sMyfile | sed 's#.couchdbdump.gz$##' )"
     echo "detected source database    : [${_sourceDB}]"
 
     if [ -z "$dbname" ]; then
@@ -387,7 +376,6 @@ function restoreByFile(){
     echo
   
     if [ $bFastMode -eq 0 ]; then
-        echo connect $couchdbhost on port $couchdbport with user $couchdbuser
         curl --head -X GET $COUCH_URL 2>/dev/null | grep "^HTTP.* 200 " >/dev/null
         if [ $? -ne 0 ]; then
             color error
@@ -464,30 +452,31 @@ j_requireBinary  "curl"         1
 j_requireBinary  "couchbackup"  1
 j_requireBinary  "couchrestore" 1
 
-#ls ${dirPythonPackages}/couchdb/tools/dump.py ${dirPythonPackages}/couchdb/tools/load.py >/dev/null && echo "OK: python couchdb tools were found"
-#rc=$rc+$?
-
-
-if [ $rc -eq 0 ]; then
+if [ $rc -ne 0 ]; then
+    rc=1
+    color.echo error "ERROR: Missing a binary. Your Couchdb data cannot be dumped."
+else
     echo
 
-    if [ "$1" = "restore" ]; then
-        echo
-        shift 1
-        restoreByFile $*
+    test "$1" = "ALL" && shift 1
+    action=$1
+    shift 1
+    "${SERVICENAME}.$action" $*
 
-    else
-        shift 1
+    # if [ "$1" = "restore" ]; then
+    #     echo
+    #     shift 1
+    #     restoreByFile $*
 
-        # remove keyword ALL which is used for localdump.sh to loop over all db types
-        test "$1" = "ALL" && shift 1
+    # else
+    #     shift 1
 
-        doBackup $*
-    fi
+    #     # remove keyword ALL which is used for localdump.sh to loop over all db types
+    #     test "$1" = "ALL" && shift 1
+
+    #     doBackup $*
+    # fi
 
-else
-    rc=1
-    color.echo error "ERROR: Your Couchdb data cannot be dumped."
 fi
 
 echo "__DB__$SERVICENAME INFO: $0 $* [$SERVICENAME] final returncode rc=$rc"
diff --git a/plugins/localdump/couchdb2.sh_bak b/plugins/localdump/couchdb2.sh_bak
new file mode 100755
index 0000000000000000000000000000000000000000..47a803c1c5e5fb9b84372dba86398d8e55d59b0e
--- /dev/null
+++ b/plugins/localdump/couchdb2.sh_bak
@@ -0,0 +1,428 @@
+#!/bin/bash
+# ================================================================================
+#
+# LOCALDUMP :: COUCHDB2 - using nodejs tools couchbackup and couchrestore
+# https://github.com/cloudant/couchbackup
+#
+# Backup:
+# - creates gzipped plain text backups (JSON) from each scheme
+# - write sequence id into a text file
+# - store extra file with security infos
+# - latest backup set is written to archive
+#
+# --------------------------------------------------------------------------------
+# ah - Axel Hahn <axel.hahn@iml.unibe.ch>
+# ds - Daniel Schueler <daniel.schueler@iml.unibe.ch>
+#
+# 2019-11-13  .....  v1.0  initial version with backup and restore (single DB)
+# 2020-05-19  .....  v1.1  backup a single or multiple couchdb instances by globbing param
+#                          ./localdump.sh backup couchdb2 demo
+# 2021-10-11  .....  v1.2  added fastmode in restore: no test connect, do not 
+#                          delete DB before create request
+# 2022-01-20         v1.3  fixes with shellcheck
+# 2022-03-17         v1.4  WIP: add lines with prefix __DB__
+# 2022-04-07         v1.5  check archive file, not only seq file
+# 2022-04-14         v1.6  backup security infos (no restore yet)
+# 2022-04-21         v1.7  restore security infos
+# 2022-10-07  ah     v1.8  unescape regex with space to prevent "grep: warning: stray \ before white space"
+# 2023-06-06  ah     v1.9  show a warning if the sequence id was not fetched
+# 2023-06-12  ah     v1.10 skip couchdb dump if no sequence id was detected (=db deleted since fetching list of all dbs)
+# ================================================================================
+
+if [ -z "$BACKUP_TARGETDIR" ]; then
+  echo "ERROR: you cannot start $(basename "$0") directly"
+  rc=$rc+1
+  exit 1
+fi
+
+# --------------------------------------------------------------------------------
+# CONFIG
+# --------------------------------------------------------------------------------
+
+# contains *.config files for each instance
+CFGDIR=~/.iml_backup/couchdb2
+
+# UNUSED
+# dirPythonPackages=/usr/lib/python2.7/site-packages
+
+ARCHIVE_DIR=$(_j_getvar "${JOBFILE}" dir-dbarchive)/couchdb2
+
+# --------------------------------------------------------------------------------
+# FUNCTIONS
+# --------------------------------------------------------------------------------
+
+# make an couch api request
+# param  string  method ... one of GET|POST|DELETE
+# param  string  relative url, i.e. _all_dbs or _stats
+# param  string  optional: data for POST|PUT requests
+function _couchapi(){
+  local method=$1
+  local apiurl=$2
+  # local outfile=$3
+  local data=$3
+
+  sParams=
+  # sParams="$sParams -u ${couchdbuser}:${couchdbpw}"
+  sParams="$sParams -X ${method}"
+  sParams="$sParams ${COUCH_URL}${apiurl}"
+  # if [ ! -z "$outfile" ]; then
+  #   sParams="$sParams -o ${outfile}"
+  # fi
+  if [ -n "$data" ]; then
+    sParams="$sParams -d ${data}"
+  fi
+  curl $sParams 2>/dev/null
+}
+
+function _getDblist(){
+   _couchapi GET _all_dbs | sed 's#\"#\n#g' | grep -Ev "^(\[|\,|\])$" | grep -v _replicator | grep -v _global_changes
+}
+
+# get value update_seq of given couchdb name
+function _getDbSeq(){
+  # _couchapi GET $1 | sed 's#,\"#\n"#g' | egrep -v "^(\[|\,|\])$" | grep update_seq | cut -f 4 -d '"'
+  _couchapi GET "$1" | sed 's#,\"#\n"#g' | grep -Ev "^(\[|\,|\])$" | grep update_seq | cut -f 4 -d '"' | cut -f 1 -d '-'
+}
+
+
+# ---------- CONFIG/ INSTANCES
+
+# get valid configured instances
+function getInstances(){
+ for mycfg in $(ls -1 ${CFGDIR}/*${1}*.config)
+ do
+   if . "$mycfg"; then
+     echo $(basename "${mycfg}" | cut -f 1 -d ".")
+   fi
+ done
+}
+
+
+# load the config of an existing instance
+# see getInstances to get valid names
+# param  string  name of the instance to load
+function loadInstance(){
+  COUCH_URL=
+  if ! . "${CFGDIR}/${1}.config"; then
+    color error
+    echo ERROR: invalid instance: $1 - the config file cannot be sourced
+    color reset
+    exit 1
+  fi
+  if [ -z "${COUCH_URL}" ]; then
+    color error
+    echo "ERROR: invalid instance: $1 - the config file has no COUCH_URL"
+    color reset
+    exit 1
+  fi
+
+}
+
+
+# ---------- BACKUP
+
+# backup with loop over instances
+# param 1  string  globbing filter to config files
+function doBackup(){
+  # for mycfg in `ls -1 ~/.iml_backup/couchdb/*.config`
+  for COUCHDB_INSTANCE in $(getInstances $1)
+  do
+    loadInstance "$COUCHDB_INSTANCE"
+
+      echo "--- instance: $COUCHDB_INSTANCE"
+      if curl --head -X GET "$COUCH_URL" 2>/dev/null | grep "^HTTP.* 200 "; then
+        echo OK, connected.
+        sleep 2
+        _doBackupOfSingleInstance
+
+      else
+        rc=$rc+1
+        color error
+        echo "ERROR: couch DB instance is not available or canot be accessed with these credentials in config file"
+        # repeat curl to show the error message
+        curl -X GET "$COUCH_URL"
+        color reset
+      fi
+
+    echo
+    echo "--- $(date) done."
+    echo
+  done
+}
+
+# make backup of all databases in a couchdb instance
+# global: COUCH_URL
+# global: COUCHDB_INSTANCE
+function _doBackupOfSingleInstance(){
+
+  create_targetdir
+  local ARCHIVE_DIR2="${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/deleted_databases"
+  for _dir in "${BACKUP_TARGETDIR}/${COUCHDB_INSTANCE}" "${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/seq" "${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/security" \
+              "${ARCHIVE_DIR2}"                         "${ARCHIVE_DIR2}/seq"                    "${ARCHIVE_DIR2}/security"
+  do
+    test -d "$_dir" || (echo "creating $_dir" ; mkdir -p "$_dir" )
+  done
+
+  echo
+  echo "    MOVE deleted databases into ${ARCHIVE_DIR2}"
+  echo
+
+  # get a list of current databases
+  dblist=/tmp/couch_list_${COUCHDB_INSTANCE}.txt
+  _getDblist > "$dblist"
+  ls -l "$dblist"
+
+  # detect deleted databases: 
+  for dumpfile in $( find "${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/" -maxdepth 1 -type f -name "*.couchdbdump.gz" )
+  do
+      dbname=$( basename $dumpfile | sed "s#\.couchdbdump\.gz##g" )
+      if ! grep "^${dbname}" "$dblist" >/dev/null; then
+              SEQFILE=${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/seq/__seq__${dbname}
+              SECURITYFILE=${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/security/__security__${dbname}.json
+              echo "DELETED $dbname ... $( ls -l ${dumpfile} | cut -f 5- -d ' ' )"
+              mv "${dumpfile}"     "${ARCHIVE_DIR2}"
+              mv "${SEQFILE}"      "${ARCHIVE_DIR2}/seq/"
+              mv "${SECURITYFILE}" "${ARCHIVE_DIR2}/security/"
+      fi
+  done
+  # done | tee /tmp/couch_archive_${COUCHDB_INSTANCE}.txt
+  echo
+
+  typeset -i iDbTotal=$( cat "$dblist" | wc -l )
+  typeset -i iDb=0
+  typeset -i iDbCount=0
+
+  echo
+  echo "    DUMP databases of instance ${COUCHDB_INSTANCE}: $iDbTotal databases"
+  echo "    TO BACKUP ${BACKUP_TARGETDIR}/${COUCHDB_INSTANCE}"
+  echo "      ARCHIVE ${ARCHIVE_DIR}/${COUCHDB_INSTANCE}"
+  echo
+
+  for dbname in $( cat "$dblist" )
+  do
+    iDb=$iDb+1
+    echo -n "----- $(date) ${COUCHDB_INSTANCE} -- $iDb of $iDbTotal - ${dbname} - "
+    OUTFILE=${BACKUP_TARGETDIR}/${COUCHDB_INSTANCE}/$(get_outfile "${dbname}").couchdbdump
+    ARCHIVFILE=${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/${dbname}.couchdbdump.gz
+    SEQFILE=${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/seq/__seq__${dbname}
+    SECURITYFILE=${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/security/__security__${dbname}.json
+
+    sSequenceCurrent=$(_getDbSeq "${dbname}")
+    sSequenceLast=$(cat "${SEQFILE}" 2>/dev/null | cut -f 1 -d '-')
+#    sSequenceLast=`cat ${SEQFILE} 2>/dev/null | tr -d '\n'`
+
+    # echo
+    # echo "update_seq --+-- current [${sSequenceCurrent}]" 
+    # echo "             +-- backup  [${sSequenceLast}]"
+    if [ "${sSequenceCurrent}" = "${sSequenceLast}" ] && [ -f "$ARCHIVFILE" ]; then
+      echo "SKIP: still on sequence ${sSequenceLast}"
+
+      # add security file for already existing databases 
+      test -f  "${SECURITYFILE}" || (
+        echo "INFO: creating missing security file ${SECURITYFILE}"
+        _couchapi GET "${dbname}/_security" > "${SECURITYFILE}"
+      )
+      
+    else
+      if [ -z "$sSequenceCurrent" ]; then
+        echo "WARNING: unable to fetch current sequence ID - maybe the database was deleted."
+      else
+        echo
+        echo "update_seq --+-- current [${sSequenceCurrent}]" 
+        echo "             +-- backup  [${sSequenceLast}]"
+        echo -n "Need to backup ... "
+        couchbackup --db "${dbname}" >"${OUTFILE}".progress 2>/dev/null && mv "${OUTFILE}".progress "${OUTFILE}"
+        fetchrc
+
+        # $myrc is last returncode - set in fetchrc
+        if [ $myrc -eq 0 ]; then
+          echo -n "gzip ... "
+          compress_file "$OUTFILE"
+          fetchrc
+          if [ $myrc -eq 0 ]; then
+            iDbCount+=1
+            cp "${OUTFILE}"* "${ARCHIVFILE}"                             \
+              && echo "${sSequenceCurrent}">"${SEQFILE}"                 \
+              && _couchapi GET "${dbname}/_security" > "${SECURITYFILE}"
+            ls -l "${ARCHIVFILE}" "${SEQFILE}" "${SECURITYFILE}"
+          fi
+        else
+          echo "ERROR occured while dumping - abort"
+        fi
+        ls -l "$OUTFILE"*
+        echo
+      fi # if [ -z "$sSequenceCurrent" ]; then
+    fi # if [ "${sSequenceCurrent}" = "${sSequenceLast}" ] ...
+  done
+  rm -f "$dblist"
+  echo "__DB__$SERVICENAME backup INFO: ${COUCHDB_INSTANCE} - backed up $iDbCount dbs of $iDbTotal total"
+
+}
+
+# ---------- RESTORE
+#
+# example: 
+#
+# (1)
+# cd /var/iml-archive/couchdb2
+# or
+# cd /var/iml-backup/couchdb2
+#
+# (2)
+# /opt/imlbackup/client/localdump.sh restore couchdb2 measured-preview-couchdbcluster/mydb.couchdbdump.gz axel-01
+#                                    ^       ^        ^                                                   ^
+#                                    |       |        |                                                   |
+#     action: restore ---------------+       |        |                                                   |
+#     database service: couchdb2 ------------+        |                                                   |
+#     filename with instance as relative path --------+                                                   |
+#     optional: target database --------------------------------------------------------------------------+
+#
+
+# restore a single backup file; the instance and db name will be detected from file
+# param  string  filename of db dump (full path or relative to BACKUP_TARGETDIR)
+# param  string  optional: target database; default: detect name from import database 
+function restoreByFile(){
+  sMyfile=$1
+  dbname=$2
+
+  bFastMode=0 # 0 = delete db first and import | 1 = create and import (on empty instance only)
+
+  echo
+  h2 "analyze dump $sMyfile"
+
+  COUCHDB_INSTANCE=$(echo $sMyfile | sed "s#${BACKUP_TARGETDIR}##g" | sed "s#\./##g" | sed "s#^/##g" | cut -f 1 -d "/")
+  echo "detected COUCHDB_INSTANCE   : [${COUCHDB_INSTANCE}]"
+  if [ -z "$COUCHDB_INSTANCE" ]; then
+    echo "ERROR: Name of the instance was not detected."
+    echo "       For couchdb restore you should cd to the ${BACKUP_TARGETDIR} or ${ARCHIVE_DIR}"
+    exit 1
+  fi
+
+  local _sourceDB="$( guessDB $sMyfile | sed 's#.couchdbdump.gz$##' )"
+  echo "detected source database    : [${_sourceDB}]"
+
+  if [ -z "$dbname" ]; then
+    dbname="$_sourceDB"
+    echo "using the same as target    : [${dbname}]"
+  else
+    echo "using db schema from param 2: [${dbname}]"
+  fi
+
+  echo
+
+  loadInstance $COUCHDB_INSTANCE
+  
+  if [ $bFastMode -eq 0 ]; then
+    echo connect $couchdbhost on port $couchdbport with user $couchdbuser
+    curl --head -X GET $COUCH_URL 2>/dev/null | grep "^HTTP.* 200 " >/dev/null
+    if [ $? -ne 0 ]; then
+        color error
+        echo ERROR: couch DB instance is not available
+        curl -X GET $COUCH_URL
+        color reset
+        exit 1
+    fi
+    color ok
+    echo OK
+    color reset
+  fi
+
+  echo
+
+  # _getDblist | grep "^${dbname}$"
+  # if [ $? -eq 0 ]; then
+  #   echo DB exists ... need to drop it first
+  # fi
+
+  if [ $bFastMode -eq 0 ]; then
+    h2 deleting database [$dbname] ...
+    color cmd
+    _couchapi DELETE $dbname
+    fetchrc
+    color reset
+  fi
+
+  h2 creating database [$dbname] ...
+  color cmd
+  _couchapi PUT $dbname
+  fetchrc
+  color reset
+
+  h2 import file ...
+  color cmd
+  zcat ${sMyfile} | couchrestore --db $dbname
+  fetchrc
+  color reset
+
+  h2 add security infos ...
+  # todo: this will fail when restoring from "deleted_databases" folder
+  SECURITYFILE=${ARCHIVE_DIR}/${COUCHDB_INSTANCE}/security/__security__${_sourceDB}.json
+  SECDATA="$( cat $SECURITYFILE )"
+  color cmd
+  echo "add security data: $SECDATA"
+  _couchapi PUT "${dbname}/_security" "$SECDATA"
+  fetchrc
+  color reset
+
+  echo
+
+}
+
+# --------------------------------------------------------------------------------
+# MAIN
+# --------------------------------------------------------------------------------
+
+
+# ----- check requirements
+
+# --- is a couchd here
+# j_requireProcess "couchdb"   1
+
+# --- very specific :-/ ... check available config files
+ls -1 ${CFGDIR}/* >/dev/null 2>&1
+rc=$rc+$?
+
+
+if [ $rc -eq 0 ]; then
+  echo OK: couchdb2 config was found on this system ... checking requirements for backup ...
+
+  j_requireBinary  "curl"         1
+  j_requireBinary  "couchbackup"  1
+  j_requireBinary  "couchrestore" 1
+
+  #ls ${dirPythonPackages}/couchdb/tools/dump.py ${dirPythonPackages}/couchdb/tools/load.py >/dev/null && echo "OK: python couchdb tools were found"
+  #rc=$rc+$?
+
+
+  if [ $rc -eq 0 ]; then
+    echo
+
+    if [ "$1" = "restore" ]; then
+      echo
+      shift 1
+      restoreByFile $*
+
+    else
+      shift 1
+
+      # remove keyword ALL which is used for localdump.sh to loop over all db types
+      test "$1" = "ALL" && shift 1
+
+      doBackup $*
+    fi
+
+  else
+    color error
+    echo ERROR: Couchdb is here but I am missing things for the backup :-/
+    color reset
+  fi
+
+else
+  rc=0
+  echo "__DB__$SERVICENAME SKIP: couchdb2 config does not seem to be here"
+fi
+
+
+echo "__DB__$SERVICENAME INFO: $0 $* [$SERVICENAME] final returncode rc=$rc"
+
+# --------------------------------------------------------------------------------
diff --git a/plugins/localdump/ldap.sh b/plugins/localdump/ldap.sh
index 1d7e09a689b81eff43458878ed3b615b69a39f4f..0343a6a51c64d14aff68c1c04ae65872e01755d1 100755
--- a/plugins/localdump/ldap.sh
+++ b/plugins/localdump/ldap.sh
@@ -14,6 +14,7 @@
 # 2021-12-14  ah     v1.2  detect sbin path to execute slapcat without path
 # 2022-03-17         v1.3  WIP: add lines with prefix __DB__
 # 2022-10-07  ah     v1.4  unescape regex with space to prevent "grep: warning: stray \ before white space"
+# 2024-10-02  ah     v1.5  rename backup and restore function
 # ================================================================================
 
 
@@ -50,7 +51,7 @@ done
 # param  string  DN
 # param  string  name of output file
 # ----------------------------------------------------------------------
-function dump_ldap(){
+function ldap.dump(){
   DN=$1
   DUMPFILE=$2
 
@@ -68,7 +69,7 @@ function dump_ldap(){
 # ----------------------------------------------------------------------
 # run ldap backups
 # ----------------------------------------------------------------------
-function doLdapBackup(){
+function ldap.backup(){
 
   create_targetdir
 
@@ -80,7 +81,7 @@ function doLdapBackup(){
     cfg2=$(echo $cfgname | sed "s#[ =,]#_#g")
     outfile=$(hostname)_ldap_olc_config__$(get_outfile ${cfg2}).ldif
 
-    dump_ldap "$cfgname" "$BACKUP_TARGETDIR/$outfile"
+    ldap.dump "$cfgname" "$BACKUP_TARGETDIR/$outfile"
   done
 
 
@@ -92,7 +93,7 @@ function doLdapBackup(){
     cfg2=`echo $cfgname | sed "s#[ =,]#_#g"`
     outfile=$(hostname)_ldap_data__$(get_outfile ${cfg2}).ldif
 
-    dump_ldap "$cfgname" "$BACKUP_TARGETDIR/$outfile"
+    ldap.dump "$cfgname" "$BACKUP_TARGETDIR/$outfile"
   done
 
   echo
@@ -101,7 +102,7 @@ function doLdapBackup(){
 }
 
 
-function restoreByFile(){
+function ldap.restore(){
   echo "TODO :-/"
   rc=$rc+1
 }
@@ -124,13 +125,11 @@ if [ $rc -ne 0 ]; then
   rc=0
   echo "__DB__$SERVICENAME SKIP: LDAP seems not to be here"
 else
-  if [ "$1" = "restore" ]; then
-    echo
-    restoreByFile "${2}"
-  else
 
-    doLdapBackup
-  fi
+  action=$1
+  shift 1
+  "${SERVICENAME}.$action" $*
+
 fi
 
 echo "__DB__$SERVICENAME INFO: $0 $* [ldap] final returncode rc=$rc"
diff --git a/plugins/localdump/mysql.sh b/plugins/localdump/mysql.sh
index 0dfaebf8d2d8a4067551c24a45976ccdc1bfbb39..e3bc6df3a61f251ca62609da7bb04ed49a51f640 100755
--- a/plugins/localdump/mysql.sh
+++ b/plugins/localdump/mysql.sh
@@ -1,3 +1,4 @@
+#!/bin/bash
 # ================================================================================
 #
 # LOCALDUMP :: MYSQL / MARIADB
@@ -16,6 +17,7 @@
 # 2022-03-21         v2.3  Check if the created dump contains "insert" 
 # 2022-03-24         v2.4  On empty dumps: detect if source db is empty too
 # 2023-10-06         v2.5  mysqldump params can be customized in jobs/env
+# 2023-10-02         v2.6  Detect a connect error before backup; remove unneeded code
 # ================================================================================
 
 if [ -z "$LOCALDUMP_LOADED" ]; then
@@ -28,41 +30,12 @@ fi
 # CONFIG
 # --------------------------------------------------------------------------------
 
-# flag: service was foound locally? One of 0|1
-mysql_FOUND=0
-
 # counters
 typeset -i mysql_COUNT_CREATE=0
 typeset -i mysql_COUNT_DUMPS=0
 typeset -i mysql_COUNT_DB=0
 typeset -i mysql_COUNT_ERRORS=0
 
-SOURCE_DIR=/var/lib/mysql
-
-
-# --------------------------------------------------------------------------------
-# PRIVATE FUNCTIONS
-# --------------------------------------------------------------------------------
-
-# make checks if a service is available on this machine
-# it sets mysql_FOUND as flag
-function mysql._check(){
-
-  j_requireBinary "mysql"             1
-  j_requireBinary "mysqldump"         1
-  # j_requireProcess "mysqld|mariadb"   1
-
-  # if [ ! -d $SOURCE_DIR ]; then
-  #   echo "INFO: directory $SOURCE_DIR doees not exist."
-  #   rc=$rc+1
-  # fi
-
-  # set flag and reset return code
-  test $rc -eq 0 && mysql_FOUND=1
-  test $rc -eq 0 || mysql_COUNT_ERRORS+=1
-  rc=0
-}
-
 # --------------------------------------------------------------------------------
 # PUBLIC FUNCTIONS I :: DB SCHEME - METHODS LOW LEVEL
 # --------------------------------------------------------------------------------
@@ -89,23 +62,25 @@ function mysql.db.dump(){
   local _dbname=$1
   local _dumpfile=$2
 
-  mysqldump ${BACKUP_PARAMS} $LD_MYSQL_DUMP_PARAMS --result-file="$_dumpfile" "$_dbname" 2>&1
-  fetchrc >/dev/null
-
-  if [ $myrc -eq 0 ]; then
-        if ! zgrep -iE "(CREATE|INSERT)" "$_dumpfile" >/dev/null
-        then
-          typeset -i local _iTables
-          _iTables=$( mysql ${BACKUP_PARAMS} --skip-column-names --batch -e "use $_dbname; show tables ;" | wc -l )
-          if [ $_iTables -eq 0 ];
-          then
-            echo -n "EMPTY DATABASE ... "
-          else
-            echo "ERROR: no data - the dump doesn't contain any CREATE or INSERT statement."
-            # force an error
-            false; fetchrc >/dev/null
-          fi
-        fi
+  if mysqldump ${BACKUP_PARAMS} $LD_MYSQL_DUMP_PARAMS --result-file="$_dumpfile" "$_dbname" 2>&1; then
+    fetchrc >/dev/null
+
+    if ! grep -q -iE "(CREATE|INSERT)" "$_dumpfile"
+    then
+      local _iTables
+      typeset -i _iTables
+      _iTables=$( mysql ${BACKUP_PARAMS} --skip-column-names --batch -e "use $_dbname; show tables ;" | wc -l )
+      if [ $_iTables -eq 0 ];
+      then
+        echo "EMPTY DATABASE ... "
+      else
+        echo "ERROR: no data - the dump doesn't contain any CREATE or INSERT statement."
+        # force an error
+        false; fetchrc >/dev/null
+      fi
+    fi
+  else
+    fetchrc
   fi
 
   test $myrc -eq 0 && mysql_COUNT_DUMPS+=1
@@ -129,7 +104,7 @@ function mysql.db.import(){
 # show a list of existing databases
 function mysql.db.list(){
   # mysql -Ee "show databases ;" | grep "^Database:" | awk '{ print $2 }'
-  local _result=$( mysql ${BACKUP_PARAMS} -Ee "show databases ;" $BACKUP_PARAMS )
+  local _result=$( mysql ${BACKUP_PARAMS} -Ee "show databases ;" )
   fetchrc >/dev/null
   test $myrc -eq 0 && mysql_COUNT_DB=$( echo "$_result" | grep -c "^Database:" ) 
   test $myrc -eq 0 && echo "$_result" | grep "^Database:" | awk '{ print $2 }'
@@ -140,91 +115,41 @@ function mysql.db.list(){
 # PUBLIC FUNCTIONS II :: HIGH LEVEL
 # --------------------------------------------------------------------------------
 
-# return result is the current service available
-# USAGE: to abort a function if not available:
-# mysql.available || return
-function mysql.available(){
-  local _rc;
-  typeset -i _rc; _rc=(1-$mysql_FOUND)
-  return $_rc
-}
-
-# make checks if the current service is available on this machine
-# param  bool  flag: silence; if any parameter is set it shows no output
-function mysql.check(){
-
-  if [ -n "$1" ]; then
-    mysql._check >/dev/null 2>&1
-  else
-    echo
-    echo Details:
-    mysql._check
-    echo
-  fi
-}
 
 # start database backup of all schemes of this service
 # no prameters
 function mysql.backup(){
-  # abort if service is not available
-  mysql.available || return
 
   local _dbname
   local _outfile
 
   create_targetdir
 
-  for _dbname in $( mysql.db.list )
-  do
-    echo -n "__DB__${SERVICENAME} backup $_dbname ... "
-    _outfile="${BACKUP_TARGETDIR}/$(get_outfile ${_dbname}).sql"
+  # it sets the variable "mysql_COUNT_DB"
+  mysql.db.list >/dev/null 2>&1
 
-    mysql.db.dump "$_dbname" "$_outfile"
-    db._compressDumpfile "$_outfile"
+  if [ $mysql_COUNT_DB -eq 0 ]; then
+    rc=1
+    echo "ERROR: no databases found or an connect error occured."
+  else
+    echo Found databases: $mysql_COUNT_DB
+    for _dbname in $( mysql.db.list )
+    do
+      echo -n "__DB__${SERVICENAME} backup $_dbname ... "
+      _outfile="${BACKUP_TARGETDIR}/$(get_outfile ${_dbname}).sql"
 
-  done
+      mysql.db.dump "$_dbname" "$_outfile"
+      db._compressDumpfile "$_outfile"
 
+    done
+  fi
 }
 
-# show help
-function mysql.help(){
-  # local _bShow=false
-  # tac "$0" | while read line
-  # do
-  #   if echo $line | grep "^function mysql\.[a-z]" >/dev/null; then
-  #     _bShow = true
-  #   fi
-  #   if echo $line | grep "^# " >/dev/null; then
-  #     _bShow = true
-  #   fi
-  # done
-  cat <<EOHELP
-Help for MYSQL-DOT functions
-
-(1) high level functions
-
-  mysql.available                 silent; exitcode is 0 if mysql is available
-  mysql.check [0|1]               check if mysql is available; shows missing checks
-  mysql.backup                    backup all databases
-  mysql.restore [FILE [DBNAME]]   restore database
-
-(2) functions on database level
-
-  mysql.db.create DBNAME          create database
-  mysql.db.dump DBNAME OUTFILE    dump given database to output file
-  mysql.db.import FILE DBNAME     import file into existing database
-  mysql.db.list                   list existing databases
-
-EOHELP
-}
 # restore database dump file into database
 # param  string  database dump file (gzipped)
 # param  string  optional: database to import; default: database is parsed from file
 function mysql.restore(){
 
-  # abort if service is not available
-  mysql.available || return
-
   local _infile=$1
   local _dbname=$2
 
@@ -258,30 +183,26 @@ function mysql.restore(){
 }
 
 # WIP: show status
-function mysql.status(){
-  h2 "WIP: Status"
-  h3 "Databases (max. 15)"
-  mysql.db.list | head -15
-  h3 "Counters"
-  cat <<EOSTATUS
+# function mysql.status(){
+#   h2 "WIP: Status"
+#   h3 "Databases (max. 15)"
+#   mysql.db.list | head -15
+#   h3 "Counters"
+#   cat <<EOSTATUS
 
-found Dbs: $mysql_COUNT_DB
-created  : $mysql_COUNT_CREATE
-dumped   : $mysql_COUNT_DUMPS
+# found Dbs: $mysql_COUNT_DB
+# created  : $mysql_COUNT_CREATE
+# dumped   : $mysql_COUNT_DUMPS
 
-ERRORS   : $mysql_COUNT_ERRORS
+# ERRORS   : $mysql_COUNT_ERRORS
+
+# EOSTATUS
+# }
 
-EOSTATUS
-}
 # --------------------------------------------------------------------------------
 # MAIN
 # --------------------------------------------------------------------------------
 
-"${SERVICENAME}".check 1
-if ! "${SERVICENAME}".available; then
-  echo "__DB__$SERVICENAME SKIP: service [$SERVICENAME] is not avilable on this machine."
-fi
-
 action=$1
 shift 1
 "${SERVICENAME}.$action" $*
diff --git a/plugins/localdump/pgsql.sh b/plugins/localdump/pgsql.sh
index fd2efe697ef21727f7fff229b8e31d321ec5b5f1..219550a1f462a34f5205ba5e691776607d60c5a9 100755
--- a/plugins/localdump/pgsql.sh
+++ b/plugins/localdump/pgsql.sh
@@ -13,6 +13,7 @@
 # 2022-01-20         v1.2  fixes with shellcheck
 # 2022-03-17         v1.3  WIP: add lines with prefix __DB__
 # 2023-09-20         v1.4  FIX could not change directory to "/root": Permission denied
+# 2024-10-02  ah     v1.5  rename backup and restore function
 # ================================================================================
 
 if [ -z "$BACKUP_TARGETDIR" ]; then
@@ -25,7 +26,6 @@ fi
 # CONFIG
 # --------------------------------------------------------------------------------
 
-  SERVICEFOUND=0
   # unix user of postgres database
   PGUSER=postgres
 
@@ -34,16 +34,8 @@ fi
 # FUNCTION
 # --------------------------------------------------------------------------------
 
-function checkRequirements(){
-  j_requireBinary "pg_dump"   1
-  j_requireProcess "postgres|postmaster" 1
 
-  # set flag and reset return code
-  test $rc -eq 0 && SERVICEFOUND=1
-  rc=0
-}
-
-function doPgsqlBackup(){
+function pgsql.backup(){
 
   create_targetdir
 
@@ -59,10 +51,12 @@ function doPgsqlBackup(){
   do
     echo -n "__DB__${SERVICENAME} backup $DATABASE ... "
     OUTFILE="${BACKUP_TARGETDIR}/$(get_outfile ${DATABASE}).sql"
-    su ${PGUSER} -c "pg_dump ${BACKUP_PARAMS} -Fp ${DATABASE} >$OUTFILE"
-    fetchrc >/dev/null
-
-    db._compressDumpfile "$OUTFILE"
+    if su ${PGUSER} -c "pg_dump ${BACKUP_PARAMS} -Fp ${DATABASE} >$OUTFILE"; then
+      fetchrc >/dev/null
+      db._compressDumpfile "$OUTFILE"
+    else
+      fetchrc
+    fi
 
   done
   cd -
@@ -72,7 +66,7 @@ function doPgsqlBackup(){
 # restore database dump file into database
 # param  string  database dump file (gzipped)
 # param  string  optional: database to import; default: database is parsed from file
-function restoreByFile(){
+function pgsql.restore(){
   sMyfile=$1
   sMyDb=$2
 
@@ -116,31 +110,32 @@ function restoreByFile(){
 
 # ----- requirements
 
-checkRequirements >/dev/null 2>&1
-if [ $SERVICEFOUND -eq 0 ]; then
-  echo "__DB__$SERVICENAME SKIP: service [$SERVICENAME] is not avilable on this machine."
+j_requireBinary "psql" 1
+
+if [ $rc -ne 0 ]; then
+  color.echo error "ERROR: Missing psql binary. Your Sqlite data cannot be dumped."
+else
+
+  action=$1
+  shift 1
+  "${SERVICENAME}.$action" $*
+
 fi
 
-case $1 in
-  check)
-    # repeat check ... but show output
-    echo
-    echo Details:
-    checkRequirements
-    echo
-    ;;
-  backup)    
-    test $SERVICEFOUND -eq 1 && doPgsqlBackup
-    ;;
-  restore)
-    shift 1
-    test $SERVICEFOUND -eq 1 && restoreByFile $*
-    ;;
-  *)
-    echo ERROR: wrong syntax: 
-    echo $0 $*
-    exit 1
-  esac
+
+# case $1 in
+#   backup)    
+#     test $SERVICEFOUND -eq 1 && pgsql.backup
+#     ;;
+#   restore)
+#     shift 1
+#     test $SERVICEFOUND -eq 1 && pgsql.restore $*
+#     ;;
+#   *)
+#     echo ERROR: wrong syntax: 
+#     echo $0 $*
+#     exit 1
+#   esac
 
 echo "__DB__$SERVICENAME INFO: $0 $* [$SERVICENAME] final returncode rc=$rc"
 
diff --git a/plugins/localdump/profiles/mysql_localhost_13306.ini b/plugins/localdump/profiles/mysql_localhost_13306.ini
new file mode 100644
index 0000000000000000000000000000000000000000..bb2f511b8ae644062f27c933d386a7ef804ae825
--- /dev/null
+++ b/plugins/localdump/profiles/mysql_localhost_13306.ini
@@ -0,0 +1,44 @@
+# ======================================================================
+#
+# DOCKER MYSQL INSTANCE ON LOCAL EXPOSED PORT
+#
+# ======================================================================
+
+[detect]
+# ----------------------------------------------------------------------
+# what to detect
+# ----------------------------------------------------------------------
+
+binary = 'mysql,mysqldump'
+
+# a running process that must be found
+process = 'mysqld|mariadb'
+
+# a port that must be open on a given host
+tcp-port = 13306
+tcp-target = localhost
+
+# process that opens a port (see netstat -tulpen) - works for local services only
+# "slirp4netns" is docker network stack
+# "rootlesskit" is docker too
+tcp-process = 'rootlesskit'
+
+
+[set]
+# ----------------------------------------------------------------------
+# data to apply if it was found
+# ----------------------------------------------------------------------
+
+su = ''
+dbuser = 'root'
+dbpassword = '12345678'
+
+# not soo nice - these information is seen in the process list
+params = '--port={tcp-port} --password={dbpassword} --user={dbuser} --host={tcp-target} --skip-ssl'
+
+# https://dev.mysql.com/doc/refman/8.0/en/environment-variables.html
+# das PW sollte man auch nicht in MYSQL_PWD ablegen
+# env = 'export var1="happy meal"; export var2="new"; export var3="year!"'
+env = ''
+
+# ----------------------------------------------------------------------
diff --git a/plugins/localdump/profiles/mysql_localhost_docker_13306.ini.example b/plugins/localdump/profiles/mysql_localhost_docker_13306.ini.example
index 3cabdc29f118176270c5e82e3a437ac50e39c485..9963ca282ac30d8cb780ddefac0288e7cd44c14a 100644
--- a/plugins/localdump/profiles/mysql_localhost_docker_13306.ini.example
+++ b/plugins/localdump/profiles/mysql_localhost_docker_13306.ini.example
@@ -31,8 +31,8 @@ su = ''
 dbuser = 'root'
 dbpassword = '12345678'
 
-# unschön - das ist in der Prozessliste
-params = '--port={tcp-port} --password={dbpassword} --user={dbuser} --host={tcp-target}'
+# not soo nice - it is visible in the process list
+params = '--port={tcp-port} --password={dbpassword} --user={dbuser} --host={tcp-target} --skip-ssl'
 
 # https://dev.mysql.com/doc/refman/8.0/en/environment-variables.html
 env = ''
diff --git a/plugins/localdump/profiles/sqlite.ini.example b/plugins/localdump/profiles/sqlite.ini.example
index 9a8bd3acf415af6b93394a4fe92e676879a20ff3..6c7db37536a9093414cd4789673c431ee07b3327 100644
--- a/plugins/localdump/profiles/sqlite.ini.example
+++ b/plugins/localdump/profiles/sqlite.ini.example
@@ -11,6 +11,7 @@ type = "sqlite"
 
 # list of files to backup
 # file[] = "/var/www/database/logs.db"
+# file[] = "/var/www/database/another.db"
 
 
 [set]
diff --git a/plugins/localdump/profiles/sqlite_ciserver.ini b/plugins/localdump/profiles/sqlite_ciserver.ini
new file mode 100644
index 0000000000000000000000000000000000000000..30f13c553509223f959db224989e4d744678c995
--- /dev/null
+++ b/plugins/localdump/profiles/sqlite_ciserver.ini
@@ -0,0 +1,26 @@
+# ======================================================================
+#
+# LOCAL SQLITE DATABASES
+#
+# ======================================================================
+
+[detect]
+# ----------------------------------------------------------------------
+# what to detect
+# ----------------------------------------------------------------------
+
+file[] = "/home/axel/data/docker/ciserver/data/imldeployment/data/database/logs.db"
+file[] = "/home/axel/data/docker/ciserver/public_html/valuestore/data/versioncache.db"
+type = "sqlite"
+
+
+[set]
+
+su = ''
+dbuser = ''
+dbpassword = ''
+
+params = ''
+env = ''
+
+# ----------------------------------------------------------------------
diff --git a/plugins/localdump/readme.md b/plugins/localdump/readme.md
index e308d6f5ffb9e4176f17a78b0e64f953fbdf3552..a395c9c3971185d8829ad20533e9433a1f8f47a3 100644
--- a/plugins/localdump/readme.md
+++ b/plugins/localdump/readme.md
@@ -1,5 +1,7 @@
 # Help plugins/localdump/
 
+## 📝 Scripts per databse type
+
 Here are database plugins that can dump and restore types of databases.
 They will be sourced by [APPDIR]/localdump.sh and cannot started directly.
 
@@ -10,8 +12,7 @@ They will be sourced by [APPDIR]/localdump.sh and cannot started directly.
 * pgsql.sh (*) - PostgreSql - using pg_dump
 * sqlite.sh - Sqlite sqlite3
 
-(*) If ever possible we use a zero config method. This plugin detects locally installed binaries
-and running processes of a given name to run a database backup or not.
+## ⚙️ Settings
 
 See settings in `[APPDIR]/jobs/backup.job`:
 
@@ -25,9 +26,16 @@ In those are the dumps containing name of database scheme and a timestamp. All d
 
 keep-days contains an integer for the days to keep database dumps locally. Older dumps will be removed.
 
-## Global vars in plugin scripts
+## 📑 Profiles
 
-```
+There are a few ini files in plugins/localdump/profiles/ that autodetect local databases using standard ports,
+You can create your own ini files to detect a remote database or container. 
+See <https://os-docs.iml.unibe.ch/iml-backup/Configuration/Database.html>
+
+
+## ✏️ Global vars in plugin scripts
+
+```text
 BACKUP_BASEDIR      {string}  base directory for db dumps
 BACKUP_DATE         {string}  string with current timestamp; will be part of filename for backups
 BACKUP_KEEP_DAYS    {int}     count of days how long to keep db dumps below $BACKUP_BASEDIR
diff --git a/plugins/localdump/sqlite.sh b/plugins/localdump/sqlite.sh
index b454d0cd582a11fb27ad1bf3dcb389d8633d4f32..a0ab60d99d6ba252a11af10eac4890323261260c 100755
--- a/plugins/localdump/sqlite.sh
+++ b/plugins/localdump/sqlite.sh
@@ -12,6 +12,7 @@
 # 2018-02-09  ah,ds  v1.1  write a .meta file after successful backup
 # 2022-03-17         v1.2  WIP: add lines with prefix __DB__
 # 2024-02-20  ah     v1.3  Handle profiles; restore into existing db file; restore file owner and perms
+# 2024-10-02  ah     v1.4  rename backup and restore function
 # ================================================================================
 
 if [ -z "$BACKUP_TARGETDIR" ]; then
@@ -19,9 +20,6 @@ if [ -z "$BACKUP_TARGETDIR" ]; then
   rc=$rc+1
   exit 1
 fi
-# --------------------------------------------------------------------------------
-# CONFIG
-# --------------------------------------------------------------------------------
 
 # --------------------------------------------------------------------------------
 # FUNCTIONS
@@ -29,7 +27,7 @@ fi
 
 # start multiple sqlite3 backups
 # files are taken from loaded profiles/sqlite*.ini - section [detect] -> files[]
-function doSqliteBackup(){
+function sqlite.backup(){
 
   create_targetdir
 
@@ -46,13 +44,14 @@ function doSqliteBackup(){
       TARGET=$(get_outfile ${DATABASE_FILE})
       TARGET=${BACKUP_TARGETDIR}/$(echo ${TARGET} | sed "s#/#_#g").sql
       META=${TARGET}.gz.meta
-      # echo -n " to $TARGET "
-      sqlite3 "$DATABASE_FILE" .dump >"${TARGET}"
-      fetchrc >/dev/null
-      db._compressDumpfile "${TARGET}" && stat "$DATABASE_FILE" >"${META}"
-
-      ls -l ${TARGET}*
-
+      if sqlite3 "$DATABASE_FILE" .dump >"${TARGET}"; then 
+        fetchrc
+        db._compressDumpfile "${TARGET}" && stat "$DATABASE_FILE" >"${META}"
+        ls -l ${TARGET}*
+      else
+        fetchrc
+        rm -f "${TARGET}"
+      fi  
     fi
   done
 }
@@ -60,7 +59,7 @@ function doSqliteBackup(){
 # restore database dump file into database
 # param  string  database dump file (gzipped)
 # param  string  optional: database to import; default: database is parsed from file
-function restoreByFile(){
+function sqlite.restore(){
   local sMyfile=$1
   local sMyDb=$2
 
@@ -95,17 +94,13 @@ function restoreByFile(){
 j_requireBinary "sqlite3"   1
 
 if [ $rc -ne 0 ]; then
-  rc=1
-  color.echo error "ERROR: Your Sqlite data cannot be dumped."
+  color.echo error "ERROR: Missing sqlite3 binary. Your Sqlite data cannot be dumped."
 else
-  if [ "$1" = "restore" ]; then
-    echo
-    shift 1
-    restoreByFile $*
 
-  else
-    doSqliteBackup
-  fi
+  action=$1
+  shift 1
+  "${SERVICENAME}.$action" $*
+
 fi
 
 echo "__DB__$SERVICENAME INFO: $0 $* [$SERVICENAME] final returncode rc=$rc"
diff --git a/transfer.sh b/transfer.sh
index c313fcfca755b609b431dccdf9a02043c92f90d2..19533c621f85f21abf6068267b8f0c17f34cb5ec 100755
--- a/transfer.sh
+++ b/transfer.sh
@@ -424,7 +424,7 @@ function setAction(){
               echo
 
               t_rcCheckBackup $myrc "${BACKUP_DIR}"
-              test $myrc -ne 0 && j_notify "Dir ${BACKUP_DIR}" "Backup for ${BACKUP_DIR} failed with rc=$myrc. See log for details: $JOB_LOGFILE" 1
+              # test $myrc -ne 0 && j_notify "Dir ${BACKUP_DIR}" "Backup for ${BACKUP_DIR} failed with rc=$myrc. See log for details: $JOB_LOGFILE" 1
 
               _j_runHooks "320-after-folder-transfer" "$myrc"