import { Scheduler } from "../../core/scheduler"; import { eq } from "drizzle-orm"; import { db } from "../../db/db"; import { backupSchedulesTable } from "../../db/schema"; import { logger } from "@zerobyte/core/node"; import { volumeService } from "../volumes/volume.service"; import { CleanupDanglingMountsJob } from "../../jobs/cleanup-dangling"; import { VolumeHealthCheckJob } from "../../jobs/healthchecks"; import { RepositoryHealthCheckJob } from "../../jobs/repository-healthchecks"; import { BackupExecutionJob } from "../../jobs/backup-execution"; import { repositoriesService } from "../repositories/repositories.service"; import { notificationsService } from "../notifications/notifications.service"; import { VolumeAutoRemountJob } from "~/server/jobs/auto-remount"; import { cache } from "~/server/utils/cache"; import { withContext } from "~/server/core/request-context"; import { backupsService } from "../backups/backups.service"; import { config } from "~/server/core/config"; import { syncProvisionedResources } from "../provisioning/provisioning"; import { toMessage } from "~/server/utils/errors"; const ensureLatestConfigurationSchema = async () => { const volumes = await db.query.volumesTable.findMany({}); for (const volume of volumes) { await withContext({ organizationId: volume.organizationId }, async () => { await volumeService.updateVolume(volume.shortId, volume).catch((err) => { logger.error(`Failed to update volume ${volume.name}: ${err}`); }); }); } const repositories = await db.query.repositoriesTable.findMany({}); for (const repo of repositories) { await withContext({ organizationId: repo.organizationId }, async () => { await repositoriesService.updateRepository(repo.shortId, {}).catch((err) => { logger.error(`Failed to update repository ${repo.name}: ${err}`); }); }); } const notifications = await db.query.notificationDestinationsTable.findMany({}); for (const notification of notifications) { await withContext({ organizationId: notification.organizationId }, async () => { await notificationsService.updateDestination(notification.id, notification).catch((err) => { logger.error(`Failed to update notification destination ${notification.id}: ${err}`); }); }); } }; export const startup = async () => { cache.clear(); await Scheduler.start(); await Scheduler.clear(); await syncProvisionedResources(config.provisioningPath).catch((error) => { logger.error(`Provisioning sync failed: ${toMessage(error)}`); }); await ensureLatestConfigurationSchema(); const { deletedSchedules } = await backupsService.cleanupOrphanedSchedules().catch((err) => { logger.error(`Failed to cleanup orphaned backup schedules on startup: ${err.message}`); return { deletedSchedules: 0 }; }); if (deletedSchedules > 0) { logger.warn(`Removed ${deletedSchedules} orphaned backup schedule(s) during startup`); } const volumes = await db.query.volumesTable.findMany({ where: { OR: [ { status: "mounted" }, { AND: [{ autoRemount: true }, { status: "error" }], }, ], }, }); for (const volume of volumes) { await withContext({ organizationId: volume.organizationId }, async () => { await volumeService.mountVolume(volume.shortId).catch((err) => { logger.error(`Error auto-remounting volume ${volume.name} on startup: ${err.message}`); }); }); } await db .update(backupSchedulesTable) .set({ lastBackupStatus: "warning", lastBackupError: "Zerobyte was restarted during the last scheduled backup", updatedAt: Date.now(), }) .where(eq(backupSchedulesTable.lastBackupStatus, "in_progress")) .catch((err) => { logger.error(`Failed to update stuck backup schedules on startup: ${err.message}`); }); Scheduler.build(CleanupDanglingMountsJob).schedule("0 * * * *"); Scheduler.build(VolumeHealthCheckJob).schedule("*/30 * * * *"); Scheduler.build(RepositoryHealthCheckJob).schedule("50 12 * * *"); Scheduler.build(BackupExecutionJob).schedule("* * * * *"); Scheduler.build(VolumeAutoRemountJob).schedule("*/5 * * * *"); };