最近把一台服务器的宝塔面板一键迁移到新服务器,显示同步完成,结果登录面板一看,网站列表“备份”一列全是“无备份”。
使用文件同步工具把下面两个备份文件目录也同步过来,还是无济于事。宝塔面板版本 v11.5.0
/www/backup/site /www/backup/database
文件在,面板不认,这种就很难受。
这篇文章就把我这次实战踩坑和修复过程一次讲清楚,亲测可用,下面截图是修复后的效果。
![]()
问题根因(重点)
很多人以为宝塔只看备份目录,其实不是。网站列表里的“有(n)”来自数据库里的 backup 表记录,不是纯目录扫描。
迁移后常见问题有两个:
只同步了备份文件,没同步 backup 记录
查错了数据库文件路径
新版宝塔通常是多库拆分,常见是:
sites 在:/www/server/panel/data/db/site.db
backup 在:/www/server/panel/data/db/backup.db
databases 在:/www/server/panel/data/db/database.db
而不是老路径 /www/server/panel/data/default.db。
先做 30 秒自检
先跑这几条命令,确认你是不是同一个问题:
# 1) 网站备份文件数量
find /www/backup/site -type f | wc -l
# 2) 识别 backup 表在哪个库(通常会输出 backup.db)
python3 - <<'PY'
import json
cfg=json.load(open('/www/server/panel/config/databases.json','r',encoding='utf-8'))
for dbname,tbs in cfg.items():
if 'backup' in tbs:
print('/www/server/panel/data/db/'+dbname)
break
PY
# 3) 看 backup 记录是否为 0
sqlite3 /www/server/panel/data/db/backup.db "select type,count(*) from backup group by type;"如果你和我一样:备份文件很多,但 backup 记录是 0,那直接执行下面脚本即可。
修复脚本(自动识别库 + 重建网站/数据库备份记录)
功能:扫描 /www/backup/site 和 /www/backup/database,自动写入 backup 表
支持 replace(覆盖重建)和 append(追加不覆盖)模式。
创建脚本:
cat > /root/bt_sync_backup_records_allinone.sh <<'EOF'
#!/usr/bin/env bash
set -euo pipefail
PANEL_PATH="${1:-/www/server/panel}"
BACKUP_ROOT="${2:-/www/backup}"
MODE="${3:-replace}" # replace | append
if [[ ! -d "$PANEL_PATH" ]]; then
echo "ERROR: PANEL_PATH 不存在: $PANEL_PATH"; exit 1
fi
if [[ ! -d "$BACKUP_ROOT" ]]; then
echo "ERROR: BACKUP_ROOT 不存在: $BACKUP_ROOT"; exit 1
fi
if [[ "$MODE" != "replace" && "$MODE" != "append" ]]; then
echo "ERROR: MODE 只能是 replace 或 append"; exit 1
fi
python3 - "$PANEL_PATH" "$BACKUP_ROOT" "$MODE" <<'PY'
import re, sys, json, sqlite3, datetime, shutil
from pathlib import Path
panel = Path(sys.argv[1])
backup_root = Path(sys.argv[2])
mode = sys.argv[3]
cfg = panel / "config" / "databases.json"
db_map = json.loads(cfg.read_text(encoding="utf-8"))
def db_file_for_table(tb: str):
for dbname, tables in db_map.items():
if tb in tables:
p = panel / "data" / "db" / dbname
if p.exists():
return p
old = panel / "data" / "default.db"
return old if old.exists() else None
backup_db = db_file_for_table("backup")
site_db = db_file_for_table("sites")
dbs_db = db_file_for_table("databases")
if not backup_db or not site_db:
print("ERROR: 无法定位 backup/sites 数据库文件")
sys.exit(2)
ts = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
bak = backup_db.with_name(backup_db.name + f".bak_{ts}")
shutil.copy2(backup_db, bak)
print("[OK] backup.db 已备份:", bak)
conn_b = sqlite3.connect(str(backup_db)); conn_b.row_factory = sqlite3.Row
conn_s = sqlite3.connect(str(site_db)); conn_s.row_factory = sqlite3.Row
conn_d = sqlite3.connect(str(dbs_db)); conn_d.row_factory = sqlite3.Row if dbs_db else None
cur_b = conn_b.cursor()
cur_s = conn_s.cursor()
cur_d = conn_d.cursor() if dbs_db else None
site_id = {r["name"]: int(r["id"]) for r in cur_s.execute("SELECT id,name FROM sites").fetchall()}
db_id_by_name, db_id_set = {}, set()
if cur_d:
for r in cur_d.execute("SELECT id,name FROM databases").fetchall():
i, n = int(r["id"]), r["name"]
db_id_set.add(i)
if n not in db_id_by_name:
db_id_by_name[n] = i
ignore_suffix = {".pl",".log",".tmp",".swp"}
ignore_names = {"Thumbs.db",".DS_Store"}
def ok_file(p: Path):
return p.is_file() and p.name not in ignore_names and p.suffix.lower() not in ignore_suffix
def tstr(ts: float):
return datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
existing = set()
if mode == "append":
for r in cur_b.execute("SELECT type,pid,filename FROM backup WHERE type IN (0,1)").fetchall():
existing.add((int(r["type"]), int(r["pid"]), r["filename"]))
rows, seen = [], set()
def add_row(tp, name, pid, filename, size, addtime):
k = (tp, pid, filename)
if k in seen: return
if mode == "append" and k in existing: return
seen.add(k)
rows.append((tp, name, pid, filename, size, addtime))
site_scanned = db_scanned = 0
# 网站备份:/www/backup/site/<site_name>/*
site_root = backup_root / "site"
if site_root.is_dir():
for sdir in site_root.iterdir():
if not sdir.is_dir():
continue
sid = site_id.get(sdir.name)
if not sid:
continue
for f in sdir.rglob("*"):
if not ok_file(f):
continue
st = f.stat()
site_scanned += 1
add_row(0, f.name, sid, str(f.resolve()), int(st.st_size), tstr(st.st_mtime))
# 兼容旧结构:/www/backup/<site_name>/*
for child in backup_root.iterdir():
if not child.is_dir() or child.name in ("site","database"):
continue
sid = site_id.get(child.name)
if not sid:
continue
for f in child.rglob("*"):
if not ok_file(f):
continue
st = f.stat()
site_scanned += 1
add_row(0, f.name, sid, str(f.resolve()), int(st.st_size), tstr(st.st_mtime))
# 数据库备份:/www/backup/database/<type>/crontab_backup
db_root = backup_root / "database"
if db_root.is_dir() and cur_d:
for tdir in db_root.iterdir():
if not tdir.is_dir():
continue
db_type = tdir.name.lower()
candidates = []
ctb = tdir / "crontab_backup"
if ctb.is_dir():
candidates.append(ctb)
candidates.append(tdir)
checked = set()
for base in candidates:
if not base.is_dir():
continue
rk = str(base.resolve())
if rk in checked:
continue
checked.add(rk)
if db_type == "redis":
for f in base.rglob("*"):
if not ok_file(f):
continue
db_scanned += 1
m = re.match(r"^(\d+)_.*redis_data\.rdb$", f.name) or re.match(r"^(\d+)_", f.name)
if not m:
continue
pid = int(m.group(1))
if pid not in db_id_set:
continue
st = f.stat()
add_row(1, f.name, pid, str(f.resolve()), int(st.st_size), tstr(st.st_mtime))
else:
for ndir in base.iterdir():
if not ndir.is_dir():
continue
pid = db_id_by_name.get(ndir.name)
if not pid:
continue
for f in ndir.rglob("*"):
if not ok_file(f):
continue
db_scanned += 1
st = f.stat()
add_row(1, f.name, pid, str(f.resolve()), int(st.st_size), tstr(st.st_mtime))
try:
conn_b.execute("BEGIN")
if mode == "replace":
conn_b.execute("DELETE FROM backup WHERE type IN (0,1)")
conn_b.executemany("INSERT INTO backup(type,name,pid,filename,size,addtime) VALUES(?,?,?,?,?,?)", rows)
conn_b.commit()
except Exception:
conn_b.rollback()
raise
finally:
conn_b.close(); conn_s.close()
if conn_d: conn_d.close()
print("=== DONE ===")
print("mode:", mode)
print("site_files_scanned:", site_scanned)
print("db_files_scanned:", db_scanned)
print("rows_inserted:", len(rows))
PY
echo
echo "=== 验证 ==="
BACKUP_DB=$(python3 - <<'PY'
import json
panel="/www/server/panel"
cfg=json.load(open(panel+"/config/databases.json","r",encoding="utf-8"))
for dbname,tbs in cfg.items():
if "backup" in tbs:
print(panel+"/data/db/"+dbname); break
PY
)
SITE_DB=$(python3 - <<'PY'
import json
panel="/www/server/panel"
cfg=json.load(open(panel+"/config/databases.json","r",encoding="utf-8"))
for dbname,tbs in cfg.items():
if "sites" in tbs:
print(panel+"/data/db/"+dbname); break
PY
)
sqlite3 "$BACKUP_DB" "SELECT type,COUNT(*) FROM backup WHERE type IN (0,1) GROUP BY type;"
echo "--- 网站备份 Top 20 ---"
sqlite3 "$BACKUP_DB" "ATTACH '$SITE_DB' AS sdb; SELECT sdb.sites.name,COUNT(*) c FROM backup JOIN sdb.sites ON sdb.sites.id=backup.pid WHERE backup.type=0 GROUP BY backup.pid ORDER BY c DESC LIMIT 20;"
EOF执行:
sed -i 's/\r$//' /root/bt_sync_backup_records_allinone.sh chmod +x /root/bt_sync_backup_records_allinone.sh bash /root/bt_sync_backup_records_allinone.sh /www/server/panel /www/backup replace
常见报错避坑
/usr/bin/env: 'bash\r': No such file or directory
说明脚本是 Windows 换行,执行:sed -i 's/\r$//' 脚本名
结果显示 site_files_scanned: 0
大概率是目录下只有空文件夹,没有实际备份文件;先用:find /www/backup/site -type f | wc -l
结果显示 rows_inserted: 0 且 site_missing_count > 0
目录名和站点名对不上(例如下划线/域名不一致),需要先统一目录名
总结
这次迁移最大的经验是:
备份迁移 = 文件 + 记录 两条线都要做。
只同步备份目录,不重建 backup 表,面板就会一直显示“无备份”。
如果你也在做宝塔迁移,建议先跑一遍上面的自检,再执行重建脚本,基本就能一次搞定。
未经允许不得转载:前端资源网 - w3h5 » 宝塔面板迁移实战:备份文件已同步,网站列表却显示“无备份”怎么修复?
前端资源网 - w3h5