diff --git a/ctx/conf.go b/ctx/conf.go index d984d79..2ee7652 100644 --- a/ctx/conf.go +++ b/ctx/conf.go @@ -35,6 +35,7 @@ type ConfOpts struct { type limitsConf struct { DiskRate *string `conf:"disk_rate"` NetRate *string `conf:"net_rate"` + CPUCount *int `conf:"cpu_max_count"` } type serverConf struct { diff --git a/ctx/context.go b/ctx/context.go index d0009ed..6afda0f 100755 --- a/ctx/context.go +++ b/ctx/context.go @@ -25,13 +25,6 @@ import ( "github.com/nixys/nxs-backup/modules/metrics" ) -type rateType string - -const ( - disk rateType = "disk" - net rateType = "net" -) - // Ctx defines application custom context type Ctx struct { Cmd interfaces.Handler @@ -212,6 +205,9 @@ func appInit(c *Ctx, cfgPath string) (app, error) { if conf.Limits.DiskRate != nil { lim.DiskRate = conf.Limits.DiskRate } + if conf.Limits.CPUCount != nil { + misc.CPULimit = *conf.Limits.CPUCount + } } // Init app diff --git a/misc/generals.go b/misc/generals.go index 1ae9aa1..5539ed3 100755 --- a/misc/generals.go +++ b/misc/generals.go @@ -38,6 +38,7 @@ const ( ) var DecadesBackupDays = []string{"1", "11", "21"} +var CPULimit = 0 func AllowedBackupTypesList() []string { return []string{ diff --git a/modules/backend/targz/targz.go b/modules/backend/targz/targz.go index f7c47ad..3b6bb48 100644 --- a/modules/backend/targz/targz.go +++ b/modules/backend/targz/targz.go @@ -2,17 +2,23 @@ package targz import ( "bytes" - "github.com/klauspost/pgzip" "io" "os" "os/exec" "path" "regexp" + "runtime" + + "github.com/klauspost/pgzip" + "github.com/nixys/nxs-backup/misc" "github.com/nixys/nxs-backup/modules/backend/files" ) -const regexToIgnoreErr = "^tar:.*(Removing leading|socket ignored|file changed as we read it|Удаляется начальный|сокет проигнорирован|файл изменился во время чтения)" +const ( + defaultBlockSize = 1 << 20 + regexToIgnoreErr = "^tar:.*(Removing leading|socket ignored|file changed as we read it|Удаляется начальный|сокет проигнорирован|файл изменился во время чтения)" +) type Error struct { Err error @@ -34,19 +40,22 @@ func (e Error) Error() string { } func GetGZipFileWriter(filePath string, gZip bool, rateLim int64) (io.WriteCloser, error) { - var wc io.WriteCloser + var gzw *pgzip.Writer lwc, err := files.GetLimitedFileWriter(filePath, rateLim) if err != nil { return nil, err } + if gZip { - wc, err = pgzip.NewWriterLevel(lwc, pgzip.BestCompression) - } else { - wc = lwc + if gzw, err = pgzip.NewWriterLevel(lwc, pgzip.BestCompression); err != nil { + return nil, err + } + err = gzw.SetConcurrency(defaultBlockSize, runtime.GOMAXPROCS(misc.CPULimit)) + lwc = gzw } - return wc, err + return lwc, err } func GZip(src, dst string, rateLim int64) error { diff --git a/modules/backup/psql_basebackup/psql_basebackup.go b/modules/backup/psql_basebackup/psql_basebackup.go index 5ca6c78..e5acd28 100755 --- a/modules/backup/psql_basebackup/psql_basebackup.go +++ b/modules/backup/psql_basebackup/psql_basebackup.go @@ -294,6 +294,7 @@ func (j *job) createTmpBackup(logCh chan logger.LogRecord, tmpBackupFile, tgtNam logCh <- logger.Log(j.name, "").Errorf("Unable to make dump `%s`. Error: %s", tgtName, stderr.String()) return err } + logCh <- logger.Log(j.name, "").Debug("Got psql data. Compressing...") if err := targz.Tar(targz.TarOpts{ Src: tmpBasebackupPath,