Bacula和多磁带设备,等等


7

Bacula不会同时使用2个磁带设备。(搜索TL; DR的#-#-#

也许有一点背景。

在尝试获得不错的工作备份解决方案(备份> 20TB并不便宜,也不容易)的过程中$dayjob,我们买了一堆东西来使其工作。

首先,有一个Spectra Logic T50e自动转换器,有40个LTO5插槽,该机器人有一对IBM HH5 Ultrium LTO5驱动器,它们通过FibreChannel Arbitrated Loop连接到我们的备份服务器。

有备份服务器。.Dell R715,带有2个16核AMD 62xx CPU和32GB RAM。好吃。该服务器有2张Emulex FCe-12000E卡和一个Intel X520-SR双端口10GE NIC。

我们还出售了Commvault Backup(非NDMP)。

这是真正复杂的地方。

Spectra Logic和Commvault都分别派出了工程师,他们负责建立磁带库和软件。Commvault运行良好,只要控制器运行正常即可。Dell服务器具有Ubuntu 12.04服务器,并运行MediaAgent for CommVault,并将BlueArc NAS作为NFS挂载到几个挂载点,例如/home和中的一些东西/mnt

从NFS安装点备份时,我们看到的是〜= 290GB/hr吞吐量。考虑到我们在不到48小时的备份时间里可以通过20多个TB,这就是CRAP。BlueArc上的最大额定值为700MB/s2460GB/hr),每个驱动器上磁带设备的最大额定写入速度为140MB / s,因此为492GB/hr(或为总吞吐量的两倍)。

因此,下一步是使用IOzone对NFS性能进行基准测试,结果证明我们获得了卓越的写入性能(跨越20个线程),并且写入速度约为1.5-2.5TB / hr,但是读取性能却毫无希望。我无法达到最高343GB /小时的最高速度。因此,让我们假设343GB/hrNAS在理论上是NAS读取性能的最大值,那么理论上我们应该能够从a) CommVault和b)任何其他备份代理中获得该性能。

并非如此。Commvault似乎只会给我带来200-250GB/hr吞吐量,并且出于试验目的,我安装了Bacula来查看游戏的运行状态。例如,如果Bacula的性能和速度始终比Commvault更好,那么我们可以说"**$.$ Refunds Plz $.$**"

#-#-#

las,我发现Bacula遇到了另一个问题。Commvault似乎很高兴用一个线程从安装点的一部分读取,并将其流式传输到Tape设备,同时使用另一个线程从其他目录读取数据,并写入自动转换器中的第二个驱动器。

我不能为我的生命得到Bacula的安装和写两个磁带驱动器同时

我尝试过的事情:

  • 设置 Maximum Concurrent Jobs = 20在主任,文件和存储守护进程
  • Prefer Mounted Volumes = no在工作定义中设置
  • 在自动转换器资源中设置多个设备。

文档似乎是以单驱动器为中心的,我们觉得这有点像把火箭绑在仓鼠上了。大多数Bacula示例配置都用于DDS4驱动器,手动磁带交换以及FreeBSD或IRIX系统。

我也许应该补充一点,我不是太打扰,如果这不是可能的,但我会感到惊讶。我基本上想使用Bacula作为证明,以将其坚持给软件供应商以确保它们被高估了;)

我在某处读到@KyleBrandt使用现代Tape解决方案做了类似的事情。

配置文件: bacula-dir.conf

#
# Default Bacula Director Configuration file

Director {                            # define myself
  Name = backuphost-1-dir
  DIRport = 9101                # where we listen for UA connections
  QueryFile = "/etc/bacula/scripts/query.sql"
  WorkingDirectory = "/var/lib/bacula"
  PidDirectory = "/var/run/bacula"
  Maximum Concurrent Jobs = 20
  Password = "yourekiddingright"         # Console password
  Messages = Daemon
  DirAddress = 0.0.0.0
  #DirAddress = 127.0.0.1
}

JobDefs {
  Name = "DefaultFileJob"
  Type = Backup
  Level = Incremental
  Client = backuphost-1-fd 
  FileSet = "Full Set"
  Schedule = "WeeklyCycle"
  Storage = File
  Messages = Standard
  Pool = File
  Priority = 10
  Write Bootstrap = "/var/lib/bacula/%c.bsr"
}

JobDefs {
  Name = "DefaultTapeJob"
  Type = Backup
  Level = Incremental
  Client = backuphost-1-fd
  FileSet = "Full Set"
  Schedule = "WeeklyCycle"
  Storage = "SpectraLogic"
  Messages = Standard
  Pool = AllTapes
  Priority = 10
  Write Bootstrap = "/var/lib/bacula/%c.bsr"
  Prefer Mounted Volumes = no

}

#
# Define the main nightly save backup job
#   By default, this job will back up to disk in /nonexistant/path/to/file/archive/dir
Job {
  Name = "BackupClient1"
  JobDefs = "DefaultFileJob"
}

Job {
  Name = "BackupThisVolume"
  JobDefs = "DefaultTapeJob"
  FileSet = "SpecialVolume"
}
#Job {
#  Name = "BackupClient2"
#  Client = backuphost-12-fd
#  JobDefs = "DefaultJob"
#}

# Backup the catalog database (after the nightly save)
Job {
  Name = "BackupCatalog"
  JobDefs = "DefaultFileJob"
  Level = Full
  FileSet="Catalog"
  Schedule = "WeeklyCycleAfterBackup"
  # This creates an ASCII copy of the catalog
  # Arguments to make_catalog_backup.pl are:
  #  make_catalog_backup.pl <catalog-name>
  RunBeforeJob = "/etc/bacula/scripts/make_catalog_backup.pl MyCatalog"
  # This deletes the copy of the catalog
  RunAfterJob  = "/etc/bacula/scripts/delete_catalog_backup"
  Write Bootstrap = "/var/lib/bacula/%n.bsr"
  Priority = 11                   # run after main backup
}

#
# Standard Restore template, to be changed by Console program
#  Only one such job is needed for all Jobs/Clients/Storage ...
#
Job {
  Name = "RestoreFiles"
  Type = Restore
  Client=backuphost-1-fd                 
  FileSet="Full Set"                  
  Storage = File                      
  Pool = Default
  Messages = Standard
  Where = /srv/bacula/restore
}

FileSet {
  Name = "SpecialVolume"
  Include {
    Options {
      signature = MD5
    }
  File = /mnt/SpecialVolume
  }
  Exclude {
    File = /var/lib/bacula
    File = /nonexistant/path/to/file/archive/dir
    File = /proc
    File = /tmp
    File = /.journal
    File = /.fsck
  }
}


# List of files to be backed up
FileSet {
  Name = "Full Set"
  Include {
    Options {
      signature = MD5
    }
    File = /usr/sbin
  }

  Exclude {
    File = /var/lib/bacula
    File = /nonexistant/path/to/file/archive/dir
    File = /proc
    File = /tmp
    File = /.journal
    File = /.fsck
  }
}

Schedule {
  Name = "WeeklyCycle"
  Run = Full 1st sun at 23:05
  Run = Differential 2nd-5th sun at 23:05
  Run = Incremental mon-sat at 23:05
}

# This schedule does the catalog. It starts after the WeeklyCycle
Schedule {
  Name = "WeeklyCycleAfterBackup"
  Run = Full sun-sat at 23:10
}

# This is the backup of the catalog
FileSet {
  Name = "Catalog"
  Include {
    Options {
      signature = MD5
    }
    File = "/var/lib/bacula/bacula.sql"
  }
}

# Client (File Services) to backup
Client {
  Name = backuphost-1-fd
  Address = localhost
  FDPort = 9102
  Catalog = MyCatalog
  Password = "surelyyourejoking"          # password for FileDaemon
  File Retention = 30 days            # 30 days
  Job Retention = 6 months            # six months
  AutoPrune = yes                     # Prune expired Jobs/Files
}

#
# Second Client (File Services) to backup
#  You should change Name, Address, and Password before using
#
#Client {
#  Name = backuphost-12-fd                
#  Address = localhost2
#  FDPort = 9102
#  Catalog = MyCatalog
#  Password = "i'mnotjokinganddontcallmeshirley"         # password for FileDaemon 2
#  File Retention = 30 days            # 30 days
#  Job Retention = 6 months            # six months
#  AutoPrune = yes                     # Prune expired Jobs/Files
#}


# Definition of file storage device
Storage {
  Name = File
# Do not use "localhost" here    
  Address = localhost                # N.B. Use a fully qualified name here
  SDPort = 9103
  Password = "lalalalala"
  Device = FileStorage
  Media Type = File
}

Storage {
  Name = "SpectraLogic"
  Address = localhost
  SDPort = 9103
  Password = "linkedinmakethebestpasswords"
  Device = Drive-1
  Device = Drive-2
  Media Type = LTO5
  Autochanger = yes
}



# Generic catalog service
Catalog {
  Name = MyCatalog
# Uncomment the following line if you want the dbi driver
# dbdriver = "dbi:sqlite3"; dbaddress = 127.0.0.1; dbport =  
  dbname = "bacula"; DB Address = ""; dbuser = "bacula"; dbpassword = ""
}

# Reasonable message delivery -- send most everything to email address
#  and to the console
Messages {
  Name = Standard

  mailcommand = "/usr/lib/bacula/bsmtp -h localhost -f \"\(Bacula\) \<%r\>\" -s \"Bacula: %t %e of %c %l\" %r"
  operatorcommand = "/usr/lib/bacula/bsmtp -h localhost -f \"\(Bacula\) \<%r\>\" -s \"Bacula: Intervention needed for %j\" %r"
  mail = root@localhost = all, !skipped            
  operator = root@localhost = mount
  console = all, !skipped, !saved
#
# WARNING! the following will create a file that you must cycle from
#          time to time as it will grow indefinitely. However, it will
#          also keep all your messages if they scroll off the console.
#
  append = "/var/lib/bacula/log" = all, !skipped
  catalog = all
}


#
# Message delivery for daemon messages (no job).
Messages {
  Name = Daemon
  mailcommand = "/usr/lib/bacula/bsmtp -h localhost -f \"\(Bacula\) \<%r\>\" -s \"Bacula daemon message\" %r"
  mail = root@localhost = all, !skipped            
  console = all, !skipped, !saved
  append = "/var/lib/bacula/log" = all, !skipped
}

# Default pool definition
Pool {
  Name = Default
  Pool Type = Backup
  Recycle = yes                       # Bacula can automatically recycle Volumes
  AutoPrune = yes                     # Prune expired volumes
  Volume Retention = 365 days         # one year
}

# File Pool definition
Pool {
  Name = File
  Pool Type = Backup
  Recycle = yes                       # Bacula can automatically recycle Volumes
  AutoPrune = yes                     # Prune expired volumes
  Volume Retention = 365 days         # one year
  Maximum Volume Bytes = 50G          # Limit Volume size to something reasonable
  Maximum Volumes = 100               # Limit number of Volumes in Pool
}

Pool {
  Name = AllTapes
  Pool Type = Backup
  Recycle = yes
  AutoPrune = yes                     # Prune expired volumes
  Volume Retention = 31 days         # one Moth
}

# Scratch pool definition
Pool {
  Name = Scratch
  Pool Type = Backup
}

#
# Restricted console used by tray-monitor to get the status of the director
#
Console {
  Name = backuphost-1-mon
  Password = "LastFMalsostorePasswordsLikeThis"
  CommandACL = status, .status
}

bacula-sd.conf

#
# Default Bacula Storage Daemon Configuration file
#

Storage {                             # definition of myself
  Name = backuphost-1-sd
  SDPort = 9103                  # Director's port      
  WorkingDirectory = "/var/lib/bacula"
  Pid Directory = "/var/run/bacula"
  Maximum Concurrent Jobs = 20
  SDAddress = 0.0.0.0
#  SDAddress = 127.0.0.1
}

#
# List Directors who are permitted to contact Storage daemon
#
Director {
  Name = backuphost-1-dir
  Password = "passwordslinplaintext"
}

#
# Restricted Director, used by tray-monitor to get the
#   status of the storage daemon
#
Director {
  Name = backuphost-1-mon
  Password = "totalinsecurityabound"
  Monitor = yes
}


Device {
  Name = FileStorage
  Media Type = File
  Archive Device = /srv/bacula/archive
  LabelMedia = yes;                   # lets Bacula label unlabeled media
  Random Access = Yes;
  AutomaticMount = yes;               # when device opened, read it
  RemovableMedia = no;
  AlwaysOpen = no;
}


Autochanger {
   Name = SpectraLogic
   Device = Drive-1
   Device = Drive-2
   Changer Command = "/etc/bacula/scripts/mtx-changer %c %o %S %a %d"
   Changer Device = /dev/sg4
}

Device {
   Name = Drive-1
   Drive Index = 0
   Archive Device = /dev/nst0
   Changer Device = /dev/sg4
   Media Type = LTO5
   AutoChanger = yes
   RemovableMedia = yes;
   AutomaticMount = yes;
   AlwaysOpen = yes;
   RandomAccess = no;
   LabelMedia = yes

}

Device {
   Name = Drive-2
   Drive Index = 1
   Archive Device = /dev/nst1
   Changer Device = /dev/sg4
   Media Type = LTO5
   AutoChanger = yes
   RemovableMedia = yes;
   AutomaticMount = yes;
   AlwaysOpen = yes;
   RandomAccess = no;
   LabelMedia = yes
}

# 
# Send all messages to the Director, 
# mount messages also are sent to the email address
#
Messages {
  Name = Standard
  director = backuphost-1-dir = all
}

bacula-fd.conf

#
# Default  Bacula File Daemon Configuration file
#

#
# List Directors who are permitted to contact this File daemon
#
Director {
  Name = backuphost-1-dir
  Password = "hahahahahaha"
}

#
# Restricted Director, used by tray-monitor to get the
#   status of the file daemon
#
Director {
  Name = backuphost-1-mon
  Password = "hohohohohho"
  Monitor = yes
}

#
# "Global" File daemon configuration specifications
#
FileDaemon {                          # this is me
  Name = backuphost-1-fd
  FDport = 9102                  # where we listen for the director
  WorkingDirectory = /var/lib/bacula
  Pid Directory = /var/run/bacula
  Maximum Concurrent Jobs = 20
  #FDAddress = 127.0.0.1
  FDAddress = 0.0.0.0
}

# Send all messages except skipped files back to Director
Messages {
  Name = Standard
  director = backuphost-1-dir = all, !skipped, !restored
}

需要澄清的是-您是否正在尝试将一个作业写入自动转换器的两个插槽?我认为您无法做到这一点,但我确实认为多个作业将写入单独的设备中……
voretaq7 2012年

1
Commvault允许单个作业写入两个插槽。我认为期望其他软件也能做到同样公平。
汤姆·奥康纳

1
如果您要尝试证明性能,那为什么不只在不同设备上同时运行2个作业,又可以证明您想要什么。
AugustBitTony 2012年

1
仅仅因为Commvault将其记录在一项工作中,并不意味着它没有将其分开。例如,Syncsort backup express通过mountpoint进行此操作,通常会在将其跨两个磁带跨越单个作业之前,将所有给定的安装点耗尽到单独的线程(在同一作业中)。我认为有一些很好的理由……不要问我它们是什么:)
SpacemanSpiff 2012年

1
我正在考虑放弃这个问题,而VTC太本地化了。
汤姆·奥康纳

Answers:


1

当您在bacula中设置文件集时,它将逐行逐字读取pathspec并像这样进行备份。

它不会创建两个线程来读取代理中的不同文件路径。

就像@SpacemanSpiff所说的那样,如果您想这样做,前进的道路将是设置不同的作业,每个作业要备份一个。


0

我为您提供了三个技巧:

  • 使用多个存储守护程序。您可以在同一台计算机的不同端口上运行多个存储守护程序。
  • 使用基本作业进行重复数据删除。节省时间和空间。
  • 使用压缩-如果您的磁带驱动器可以很好地进行压缩,但是您可能需要权衡一下它,并尝试使用bacula-fd压缩。这发生在客户端上,因此也节省了带宽,从而节省了CPU时间。
By using our site, you acknowledge that you have read and understand our Cookie Policy and Privacy Policy.
Licensed under cc by-sa 3.0 with attribution required.