Initial commit: Disaster recovery CLI tool
A Go-based CLI tool for recovering servers from backups to new cloud VMs. Features: - Multi-cloud support: Exoscale, Cloudscale, Hetzner Cloud - Backup sources: Local filesystem, Hetzner Storage Box - 6-stage restore pipeline with /etc whitelist protection - DNS migration with safety checks and auto-rollback - Dry-run by default, requires --yes to execute - Cloud-init for SSH key injection Packages: - cmd/recover-server: CLI commands (recover, migrate-dns, list, cleanup) - internal/providers: Cloud provider implementations - internal/backup: Backup source implementations - internal/restore: 6-stage restore pipeline - internal/dns: Exoscale DNS management - internal/ui: Prompts, progress, dry-run display - internal/config: Environment and host configuration 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
200
internal/backup/hetzner_storage.go
Normal file
200
internal/backup/hetzner_storage.go
Normal file
@@ -0,0 +1,200 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HetznerStorageSource implements BackupSource for Hetzner Storage Box
|
||||
type HetznerStorageSource struct {
|
||||
User string // e.g., u480813
|
||||
Host string // e.g., u480813.your-storagebox.de
|
||||
BasePath string // e.g., /backups
|
||||
Port int // SSH port (default 23 for sftp, 22 for ssh)
|
||||
}
|
||||
|
||||
// NewHetznerStorageSource creates a new Hetzner Storage Box source
|
||||
func NewHetznerStorageSource(user, host string) *HetznerStorageSource {
|
||||
if user == "" {
|
||||
user = "u480813"
|
||||
}
|
||||
if host == "" {
|
||||
host = "u480813.your-storagebox.de"
|
||||
}
|
||||
return &HetznerStorageSource{
|
||||
User: user,
|
||||
Host: host,
|
||||
BasePath: "/backups",
|
||||
Port: 23, // Hetzner uses port 23 for SFTP
|
||||
}
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) Name() string {
|
||||
return "hetzner"
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) sshAddress() string {
|
||||
return fmt.Sprintf("%s@%s", s.User, s.Host)
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) List(ctx context.Context, host string) ([]BackupInfo, error) {
|
||||
var backups []BackupInfo
|
||||
|
||||
// Use sftp to list directories
|
||||
path := s.BasePath
|
||||
if host != "" {
|
||||
path = fmt.Sprintf("%s/%s", s.BasePath, host)
|
||||
}
|
||||
|
||||
// Run sftp ls command
|
||||
cmd := exec.CommandContext(ctx, "sftp", "-P", fmt.Sprintf("%d", s.Port), "-o", "BatchMode=yes", s.sshAddress())
|
||||
cmd.Stdin = strings.NewReader(fmt.Sprintf("ls -la %s\nquit\n", path))
|
||||
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("sftp failed: %w: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
// Parse output
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
for _, line := range lines {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) < 9 {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip . and ..
|
||||
name := fields[len(fields)-1]
|
||||
if name == "." || name == ".." {
|
||||
continue
|
||||
}
|
||||
|
||||
// Check if it's a directory
|
||||
if !strings.HasPrefix(fields[0], "d") {
|
||||
continue
|
||||
}
|
||||
|
||||
hostName := name
|
||||
if host != "" {
|
||||
// We're looking at subdirs of a specific host
|
||||
continue
|
||||
}
|
||||
|
||||
info := &BackupInfo{
|
||||
Host: hostName,
|
||||
Source: "hetzner",
|
||||
Path: fmt.Sprintf("%s/%s", s.BasePath, hostName),
|
||||
Timestamp: time.Now(), // Would need additional sftp commands for real timestamp
|
||||
}
|
||||
|
||||
// Check for subdirectories (simplified - assume all exist)
|
||||
info.HasRoot = true
|
||||
info.HasOpt = true
|
||||
info.HasEtc = true
|
||||
|
||||
backups = append(backups, *info)
|
||||
}
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) SyncTo(ctx context.Context, host string, targetSSH string, sshKeyPath string, dirs []string) error {
|
||||
// For Hetzner Storage Box, we need to rsync from storage box to target
|
||||
// This requires the target VM to pull from storage box
|
||||
// OR we rsync storage->local->target (two-hop)
|
||||
|
||||
// Option 1: Direct rsync from storage box (requires storage box SSH access from target)
|
||||
// Option 2: Two-hop: storage -> local staging -> target
|
||||
|
||||
// We'll use Option 2 for reliability (target may not have storage box access)
|
||||
stagingDir := fmt.Sprintf("/tmp/restore-staging-%s", host)
|
||||
if err := os.MkdirAll(stagingDir, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create staging dir: %w", err)
|
||||
}
|
||||
defer os.RemoveAll(stagingDir)
|
||||
|
||||
srcPath := fmt.Sprintf("%s:%s/%s/", s.sshAddress(), s.BasePath, host)
|
||||
|
||||
for _, dir := range dirs {
|
||||
// Step 1: Rsync from Hetzner to local staging
|
||||
localStaging := fmt.Sprintf("%s/%s/", stagingDir, dir)
|
||||
if err := os.MkdirAll(localStaging, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pullArgs := []string{
|
||||
"-avz",
|
||||
"--progress",
|
||||
"-e", fmt.Sprintf("ssh -p %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null", s.Port),
|
||||
fmt.Sprintf("%s%s/", srcPath, dir),
|
||||
localStaging,
|
||||
}
|
||||
|
||||
pullCmd := exec.CommandContext(ctx, "rsync", pullArgs...)
|
||||
pullCmd.Stdout = os.Stdout
|
||||
pullCmd.Stderr = os.Stderr
|
||||
|
||||
if err := pullCmd.Run(); err != nil {
|
||||
// Directory might not exist, continue
|
||||
continue
|
||||
}
|
||||
|
||||
// Step 2: Rsync from local staging to target
|
||||
var targetPath string
|
||||
switch dir {
|
||||
case "root":
|
||||
targetPath = "/root/"
|
||||
case "opt":
|
||||
targetPath = "/opt/"
|
||||
case "etc":
|
||||
targetPath = "/srv/restore/etc/"
|
||||
default:
|
||||
targetPath = "/" + dir + "/"
|
||||
}
|
||||
|
||||
pushArgs := []string{
|
||||
"-avz",
|
||||
"--progress",
|
||||
"-e", fmt.Sprintf("ssh -i %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null", sshKeyPath),
|
||||
localStaging,
|
||||
fmt.Sprintf("%s:%s", targetSSH, targetPath),
|
||||
}
|
||||
|
||||
pushCmd := exec.CommandContext(ctx, "rsync", pushArgs...)
|
||||
pushCmd.Stdout = os.Stdout
|
||||
pushCmd.Stderr = os.Stderr
|
||||
|
||||
if err := pushCmd.Run(); err != nil {
|
||||
return fmt.Errorf("rsync to target failed for %s: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) GetPath(host string) string {
|
||||
return fmt.Sprintf("%s@%s:%s/%s", s.User, s.Host, s.BasePath, host)
|
||||
}
|
||||
|
||||
func (s *HetznerStorageSource) Validate(ctx context.Context) error {
|
||||
// Test SFTP connection
|
||||
cmd := exec.CommandContext(ctx, "sftp", "-P", fmt.Sprintf("%d", s.Port), "-o", "BatchMode=yes", "-o", "ConnectTimeout=10", s.sshAddress())
|
||||
cmd.Stdin = strings.NewReader("ls\nquit\n")
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("cannot connect to Hetzner Storage Box: %w: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
169
internal/backup/local.go
Normal file
169
internal/backup/local.go
Normal file
@@ -0,0 +1,169 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
// LocalSource implements BackupSource for local filesystem
|
||||
type LocalSource struct {
|
||||
BasePath string // e.g., /srv/backups
|
||||
}
|
||||
|
||||
// NewLocalSource creates a new local backup source
|
||||
func NewLocalSource(basePath string) *LocalSource {
|
||||
if basePath == "" {
|
||||
basePath = "/srv/backups"
|
||||
}
|
||||
return &LocalSource{BasePath: basePath}
|
||||
}
|
||||
|
||||
func (s *LocalSource) Name() string {
|
||||
return "local"
|
||||
}
|
||||
|
||||
func (s *LocalSource) List(ctx context.Context, host string) ([]BackupInfo, error) {
|
||||
var backups []BackupInfo
|
||||
|
||||
if host != "" {
|
||||
// List specific host
|
||||
info, err := s.getBackupInfo(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info != nil {
|
||||
backups = append(backups, *info)
|
||||
}
|
||||
} else {
|
||||
// List all hosts
|
||||
entries, err := os.ReadDir(s.BasePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read backup directory: %w", err)
|
||||
}
|
||||
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
info, err := s.getBackupInfo(entry.Name())
|
||||
if err != nil {
|
||||
continue // Skip invalid backups
|
||||
}
|
||||
if info != nil {
|
||||
backups = append(backups, *info)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return backups, nil
|
||||
}
|
||||
|
||||
func (s *LocalSource) getBackupInfo(host string) (*BackupInfo, error) {
|
||||
hostPath := filepath.Join(s.BasePath, host)
|
||||
|
||||
stat, err := os.Stat(hostPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := &BackupInfo{
|
||||
Host: host,
|
||||
Source: "local",
|
||||
Path: hostPath,
|
||||
Timestamp: stat.ModTime(),
|
||||
}
|
||||
|
||||
// Check for subdirectories
|
||||
for _, dir := range SupportedDirs {
|
||||
dirPath := filepath.Join(hostPath, dir)
|
||||
if _, err := os.Stat(dirPath); err == nil {
|
||||
switch dir {
|
||||
case "root":
|
||||
info.HasRoot = true
|
||||
case "opt":
|
||||
info.HasOpt = true
|
||||
case "etc":
|
||||
info.HasEtc = true
|
||||
}
|
||||
|
||||
// Add directory size
|
||||
size, _ := dirSize(dirPath)
|
||||
info.SizeBytes += size
|
||||
}
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (s *LocalSource) SyncTo(ctx context.Context, host string, targetSSH string, sshKeyPath string, dirs []string) error {
|
||||
hostPath := filepath.Join(s.BasePath, host)
|
||||
|
||||
for _, dir := range dirs {
|
||||
srcPath := filepath.Join(hostPath, dir) + "/"
|
||||
|
||||
// Check source exists
|
||||
if _, err := os.Stat(srcPath); os.IsNotExist(err) {
|
||||
continue // Skip missing directories
|
||||
}
|
||||
|
||||
// Determine target path
|
||||
var targetPath string
|
||||
switch dir {
|
||||
case "root":
|
||||
targetPath = "/root/"
|
||||
case "opt":
|
||||
targetPath = "/opt/"
|
||||
case "etc":
|
||||
targetPath = "/srv/restore/etc/" // Stage /etc, don't overwrite directly
|
||||
default:
|
||||
targetPath = "/" + dir + "/"
|
||||
}
|
||||
|
||||
// Build rsync command
|
||||
args := []string{
|
||||
"-avz",
|
||||
"--progress",
|
||||
"-e", fmt.Sprintf("ssh -i %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null", sshKeyPath),
|
||||
srcPath,
|
||||
fmt.Sprintf("%s:%s", targetSSH, targetPath),
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "rsync", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return fmt.Errorf("rsync failed for %s: %w", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *LocalSource) GetPath(host string) string {
|
||||
return filepath.Join(s.BasePath, host)
|
||||
}
|
||||
|
||||
func (s *LocalSource) Validate(ctx context.Context) error {
|
||||
if _, err := os.Stat(s.BasePath); err != nil {
|
||||
return fmt.Errorf("backup path not accessible: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dirSize calculates the total size of a directory
|
||||
func dirSize(path string) (int64, error) {
|
||||
var size int64
|
||||
err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
size += info.Size()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return size, err
|
||||
}
|
||||
40
internal/backup/source.go
Normal file
40
internal/backup/source.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackupInfo contains metadata about a backup
|
||||
type BackupInfo struct {
|
||||
Host string
|
||||
Source string // "local" or "hetzner"
|
||||
Path string // Full path to backup
|
||||
Timestamp time.Time // Last modified time
|
||||
SizeBytes int64 // Total size
|
||||
HasRoot bool // Has /root backup
|
||||
HasOpt bool // Has /opt backup
|
||||
HasEtc bool // Has /etc backup
|
||||
}
|
||||
|
||||
// BackupSource defines the interface for backup sources
|
||||
type BackupSource interface {
|
||||
// Name returns the source name
|
||||
Name() string
|
||||
|
||||
// List returns available backups for a host (or all hosts if empty)
|
||||
List(ctx context.Context, host string) ([]BackupInfo, error)
|
||||
|
||||
// SyncTo syncs backup data to target VM via rsync over SSH
|
||||
// targetSSH is user@host, sshKeyPath is path to private key
|
||||
SyncTo(ctx context.Context, host string, targetSSH string, sshKeyPath string, dirs []string) error
|
||||
|
||||
// GetPath returns the base path for a host's backup
|
||||
GetPath(host string) string
|
||||
|
||||
// Validate checks if backup source is accessible
|
||||
Validate(ctx context.Context) error
|
||||
}
|
||||
|
||||
// SupportedDirs lists directories that can be restored
|
||||
var SupportedDirs = []string{"root", "opt", "etc"}
|
||||
88
internal/config/config.go
Normal file
88
internal/config/config.go
Normal file
@@ -0,0 +1,88 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/joho/godotenv"
|
||||
)
|
||||
|
||||
// Config holds all configuration values
|
||||
type Config struct {
|
||||
// Exoscale
|
||||
ExoscaleAPIKey string
|
||||
ExoscaleAPISecret string
|
||||
|
||||
// Cloudscale
|
||||
CloudscaleAPIToken string
|
||||
|
||||
// Hetzner Cloud
|
||||
HetznerAPIKey string
|
||||
|
||||
// Hetzner Storage Box (for backups)
|
||||
HetznerStorageBoxUser string
|
||||
HetznerStorageBoxHost string
|
||||
|
||||
// Local backup path
|
||||
LocalBackupPath string
|
||||
}
|
||||
|
||||
var cfg *Config
|
||||
|
||||
// Load reads configuration from .env file
|
||||
func Load(envFile string) error {
|
||||
if envFile != "" {
|
||||
if err := godotenv.Load(envFile); err != nil {
|
||||
// Not fatal - env vars might be set directly
|
||||
fmt.Fprintf(os.Stderr, "Warning: could not load %s: %v\n", envFile, err)
|
||||
}
|
||||
}
|
||||
|
||||
cfg = &Config{
|
||||
ExoscaleAPIKey: os.Getenv("EXOSCALE_API_KEY"),
|
||||
ExoscaleAPISecret: os.Getenv("EXOSCALE_API_SECRET"),
|
||||
CloudscaleAPIToken: os.Getenv("CLOUDSCALE_API_TOKEN"),
|
||||
HetznerAPIKey: os.Getenv("HETZNER_API_KEY"),
|
||||
HetznerStorageBoxUser: getEnvDefault("HETZNER_STORAGEBOX_USER", "u480813"),
|
||||
HetznerStorageBoxHost: getEnvDefault("HETZNER_STORAGEBOX_HOST", "u480813.your-storagebox.de"),
|
||||
LocalBackupPath: getEnvDefault("LOCAL_BACKUP_PATH", "/srv/backups"),
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the current configuration
|
||||
func Get() *Config {
|
||||
if cfg == nil {
|
||||
cfg = &Config{}
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
// Validate checks required credentials for a provider
|
||||
func (c *Config) ValidateProvider(provider string) error {
|
||||
switch provider {
|
||||
case "exoscale":
|
||||
if c.ExoscaleAPIKey == "" || c.ExoscaleAPISecret == "" {
|
||||
return fmt.Errorf("EXOSCALE_API_KEY and EXOSCALE_API_SECRET required")
|
||||
}
|
||||
case "cloudscale":
|
||||
if c.CloudscaleAPIToken == "" {
|
||||
return fmt.Errorf("CLOUDSCALE_API_TOKEN required")
|
||||
}
|
||||
case "hetzner":
|
||||
if c.HetznerAPIKey == "" {
|
||||
return fmt.Errorf("HETZNER_API_KEY required")
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("unknown provider: %s", provider)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getEnvDefault(key, defaultVal string) string {
|
||||
if val := os.Getenv(key); val != "" {
|
||||
return val
|
||||
}
|
||||
return defaultVal
|
||||
}
|
||||
60
internal/config/hosts.go
Normal file
60
internal/config/hosts.go
Normal file
@@ -0,0 +1,60 @@
|
||||
package config
|
||||
|
||||
// HostConfig defines a recoverable host
|
||||
type HostConfig struct {
|
||||
Name string // Short name (proton, elektron, etc.)
|
||||
FQDN string // Full domain name
|
||||
DNSZone string // DNS zone for A/AAAA records
|
||||
Services []string // Docker services to restore
|
||||
BackupDir string // Subdirectory in backup source
|
||||
}
|
||||
|
||||
// KnownHosts contains all configured hosts
|
||||
var KnownHosts = map[string]HostConfig{
|
||||
"proton": {
|
||||
Name: "proton",
|
||||
FQDN: "proton.obr.sh",
|
||||
DNSZone: "obr.sh",
|
||||
Services: []string{"gitea", "traefik", "portainer"},
|
||||
BackupDir: "proton",
|
||||
},
|
||||
"photon": {
|
||||
Name: "photon",
|
||||
FQDN: "photon.obnh.io",
|
||||
DNSZone: "obnh.io",
|
||||
Services: []string{"gitea", "nginx"},
|
||||
BackupDir: "photon",
|
||||
},
|
||||
"elektron": {
|
||||
Name: "elektron",
|
||||
FQDN: "elektron.obr.sh",
|
||||
DNSZone: "obr.sh",
|
||||
Services: []string{"gitea", "dns", "monitoring"},
|
||||
BackupDir: "elektron",
|
||||
},
|
||||
"fry": {
|
||||
Name: "fry",
|
||||
FQDN: "fry.obr.sh",
|
||||
DNSZone: "obr.sh",
|
||||
Services: []string{"mastodon", "gitea", "traefik"},
|
||||
BackupDir: "fry",
|
||||
},
|
||||
}
|
||||
|
||||
// GetHost returns host config by name
|
||||
func GetHost(name string) (*HostConfig, bool) {
|
||||
h, ok := KnownHosts[name]
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
return &h, true
|
||||
}
|
||||
|
||||
// ListHosts returns all known host names
|
||||
func ListHosts() []string {
|
||||
hosts := make([]string, 0, len(KnownHosts))
|
||||
for name := range KnownHosts {
|
||||
hosts = append(hosts, name)
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
178
internal/dns/exoscale.go
Normal file
178
internal/dns/exoscale.go
Normal file
@@ -0,0 +1,178 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
v3 "github.com/exoscale/egoscale/v3"
|
||||
"github.com/exoscale/egoscale/v3/credentials"
|
||||
)
|
||||
|
||||
// ExoscaleDNS manages DNS records via Exoscale API
|
||||
type ExoscaleDNS struct {
|
||||
client *v3.Client
|
||||
}
|
||||
|
||||
// DNSRecord represents a DNS record
|
||||
type DNSRecord struct {
|
||||
ID string
|
||||
Type string // A, AAAA, CNAME, etc.
|
||||
Name string // subdomain or @ for apex
|
||||
Content string // IP or target
|
||||
TTL int64
|
||||
}
|
||||
|
||||
// NewExoscaleDNS creates a new Exoscale DNS client
|
||||
func NewExoscaleDNS(apiKey, apiSecret string) (*ExoscaleDNS, error) {
|
||||
creds := credentials.NewStaticCredentials(apiKey, apiSecret)
|
||||
client, err := v3.NewClient(creds)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create Exoscale client: %w", err)
|
||||
}
|
||||
|
||||
return &ExoscaleDNS{client: client}, nil
|
||||
}
|
||||
|
||||
// GetRecord gets a specific DNS record
|
||||
func (d *ExoscaleDNS) GetRecord(ctx context.Context, zone, recordType, name string) (*DNSRecord, error) {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var domainID v3.UUID
|
||||
for _, domain := range domains.DNSDomains {
|
||||
if domain.UnicodeName == zone {
|
||||
domainID = domain.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if domainID == "" {
|
||||
return nil, fmt.Errorf("zone %s not found", zone)
|
||||
}
|
||||
|
||||
records, err := d.client.ListDNSDomainRecords(ctx, domainID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list records: %w", err)
|
||||
}
|
||||
|
||||
for _, rec := range records.DNSDomainRecords {
|
||||
if rec.Type == v3.DNSDomainRecordType(recordType) && rec.Name == name {
|
||||
return &DNSRecord{
|
||||
ID: string(rec.ID),
|
||||
Type: string(rec.Type),
|
||||
Name: rec.Name,
|
||||
Content: rec.Content,
|
||||
TTL: rec.Ttl,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("record %s.%s not found", name, zone)
|
||||
}
|
||||
|
||||
// UpdateRecord updates a DNS record
|
||||
func (d *ExoscaleDNS) UpdateRecord(ctx context.Context, zone string, record *DNSRecord) error {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var domainID v3.UUID
|
||||
for _, domain := range domains.DNSDomains {
|
||||
if domain.UnicodeName == zone {
|
||||
domainID = domain.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if domainID == "" {
|
||||
return fmt.Errorf("zone %s not found", zone)
|
||||
}
|
||||
|
||||
op, err := d.client.UpdateDNSDomainRecord(ctx, domainID, v3.UUID(record.ID), v3.UpdateDNSDomainRecordRequest{
|
||||
Content: record.Content,
|
||||
Ttl: record.TTL,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update record: %w", err)
|
||||
}
|
||||
|
||||
_, err = d.client.Wait(ctx, op, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return fmt.Errorf("update operation failed: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListRecords lists all records in a zone
|
||||
func (d *ExoscaleDNS) ListRecords(ctx context.Context, zone string) ([]DNSRecord, error) {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var domainID v3.UUID
|
||||
for _, domain := range domains.DNSDomains {
|
||||
if domain.UnicodeName == zone {
|
||||
domainID = domain.ID
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if domainID == "" {
|
||||
return nil, fmt.Errorf("zone %s not found", zone)
|
||||
}
|
||||
|
||||
records, err := d.client.ListDNSDomainRecords(ctx, domainID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list records: %w", err)
|
||||
}
|
||||
|
||||
var result []DNSRecord
|
||||
for _, rec := range records.DNSDomainRecords {
|
||||
result = append(result, DNSRecord{
|
||||
ID: string(rec.ID),
|
||||
Type: string(rec.Type),
|
||||
Name: rec.Name,
|
||||
Content: rec.Content,
|
||||
TTL: rec.Ttl,
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// ListZones lists all DNS zones managed in Exoscale
|
||||
func (d *ExoscaleDNS) ListZones(ctx context.Context) ([]string, error) {
|
||||
domains, err := d.client.ListDNSDomains(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list domains: %w", err)
|
||||
}
|
||||
|
||||
var zones []string
|
||||
for _, domain := range domains.DNSDomains {
|
||||
zones = append(zones, domain.UnicodeName)
|
||||
}
|
||||
|
||||
return zones, nil
|
||||
}
|
||||
|
||||
// ResolveCurrentIP resolves the current IP for a hostname
|
||||
func ResolveCurrentIP(hostname string) (string, error) {
|
||||
ips, err := net.LookupIP(hostname)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
for _, ip := range ips {
|
||||
if ipv4 := ip.To4(); ipv4 != nil {
|
||||
return ipv4.String(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no IPv4 address found for %s", hostname)
|
||||
}
|
||||
135
internal/dns/health.go
Normal file
135
internal/dns/health.go
Normal file
@@ -0,0 +1,135 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HealthResult contains the result of health checks
|
||||
type HealthResult struct {
|
||||
SSHReady bool
|
||||
HTTPSReady bool
|
||||
SSHError error
|
||||
HTTPSError error
|
||||
}
|
||||
|
||||
// HealthChecker performs health checks on VMs
|
||||
type HealthChecker struct {
|
||||
SSHTimeout time.Duration
|
||||
HTTPSTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewHealthChecker creates a new health checker
|
||||
func NewHealthChecker() *HealthChecker {
|
||||
return &HealthChecker{
|
||||
SSHTimeout: 10 * time.Second,
|
||||
HTTPSTimeout: 10 * time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// CheckSSH checks if SSH is accessible
|
||||
func (h *HealthChecker) CheckSSH(ctx context.Context, ip string, port int) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
dialer := &net.Dialer{
|
||||
Timeout: h.SSHTimeout,
|
||||
}
|
||||
|
||||
conn, err := dialer.DialContext(ctx, "tcp", address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("SSH not accessible: %w", err)
|
||||
}
|
||||
conn.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckHTTPS checks if HTTPS is accessible
|
||||
func (h *HealthChecker) CheckHTTPS(ctx context.Context, ip string, port int) error {
|
||||
if port == 0 {
|
||||
port = 443
|
||||
}
|
||||
|
||||
// Use IP directly with insecure TLS (we just want to verify the port is open)
|
||||
client := &http.Client{
|
||||
Timeout: h.HTTPSTimeout,
|
||||
Transport: &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("https://%s:%d/", ip, port)
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
// Connection refused or timeout is a failure
|
||||
// But TLS errors mean the port is open (which is what we want)
|
||||
if _, ok := err.(*tls.CertificateVerificationError); ok {
|
||||
return nil // Port is open, TLS is working
|
||||
}
|
||||
|
||||
// For other TLS errors, the port is still open
|
||||
if netErr, ok := err.(net.Error); ok && !netErr.Timeout() {
|
||||
// Non-timeout network error might still mean port is open
|
||||
// Check if we can at least connect
|
||||
conn, connErr := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", ip, port), h.HTTPSTimeout)
|
||||
if connErr == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("HTTPS not accessible: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckAll performs all health checks
|
||||
func (h *HealthChecker) CheckAll(ctx context.Context, ip string) *HealthResult {
|
||||
result := &HealthResult{}
|
||||
|
||||
result.SSHError = h.CheckSSH(ctx, ip, 22)
|
||||
result.SSHReady = result.SSHError == nil
|
||||
|
||||
result.HTTPSError = h.CheckHTTPS(ctx, ip, 443)
|
||||
result.HTTPSReady = result.HTTPSError == nil
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// WaitForReady waits for all health checks to pass
|
||||
func (h *HealthChecker) WaitForReady(ctx context.Context, ip string, timeout time.Duration) error {
|
||||
deadline := time.Now().Add(timeout)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
result := h.CheckAll(ctx, ip)
|
||||
if result.SSHReady {
|
||||
return nil // SSH is the minimum requirement
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
// Continue checking
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("timeout waiting for VM to be ready")
|
||||
}
|
||||
189
internal/dns/migration.go
Normal file
189
internal/dns/migration.go
Normal file
@@ -0,0 +1,189 @@
|
||||
package dns
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// MigrationRequest contains DNS migration parameters
|
||||
type MigrationRequest struct {
|
||||
Hostname string // Full hostname (e.g., proton.obr.sh)
|
||||
OldIP string // Expected current IP
|
||||
NewIP string // New IP to point to
|
||||
Zone string // DNS zone (e.g., obr.sh)
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
// MigrationResult contains the result of a DNS migration
|
||||
type MigrationResult struct {
|
||||
Success bool
|
||||
Message string
|
||||
OldRecord *DNSRecord
|
||||
NewRecord *DNSRecord
|
||||
RolledBack bool
|
||||
}
|
||||
|
||||
// Migrator handles DNS migration with safety checks
|
||||
type Migrator struct {
|
||||
dns *ExoscaleDNS
|
||||
health *HealthChecker
|
||||
verbose bool
|
||||
}
|
||||
|
||||
// NewMigrator creates a new DNS migrator
|
||||
func NewMigrator(dns *ExoscaleDNS, verbose bool) *Migrator {
|
||||
return &Migrator{
|
||||
dns: dns,
|
||||
health: NewHealthChecker(),
|
||||
verbose: verbose,
|
||||
}
|
||||
}
|
||||
|
||||
// Migrate performs DNS migration with safety checks
|
||||
func (m *Migrator) Migrate(ctx context.Context, req MigrationRequest) (*MigrationResult, error) {
|
||||
result := &MigrationResult{}
|
||||
|
||||
// Parse hostname to get subdomain and zone
|
||||
subdomain, zone := parseHostname(req.Hostname, req.Zone)
|
||||
|
||||
if m.verbose {
|
||||
fmt.Printf("DNS Migration: %s -> %s\n", req.OldIP, req.NewIP)
|
||||
fmt.Printf(" Zone: %s, Subdomain: %s\n", zone, subdomain)
|
||||
}
|
||||
|
||||
// Step 1: Verify new VM is accessible
|
||||
if m.verbose {
|
||||
fmt.Println("\n=== Pre-flight checks ===")
|
||||
}
|
||||
|
||||
healthResult := m.health.CheckAll(ctx, req.NewIP)
|
||||
if !healthResult.SSHReady {
|
||||
return nil, fmt.Errorf("new VM not accessible on SSH (port 22): %s", req.NewIP)
|
||||
}
|
||||
if m.verbose {
|
||||
fmt.Printf(" ✓ SSH accessible on %s\n", req.NewIP)
|
||||
}
|
||||
|
||||
// Step 2: Get current DNS record
|
||||
record, err := m.dns.GetRecord(ctx, zone, "A", subdomain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get current DNS record: %w", err)
|
||||
}
|
||||
result.OldRecord = record
|
||||
|
||||
// Step 3: Verify current DNS matches expected old IP
|
||||
if record.Content != req.OldIP {
|
||||
return nil, fmt.Errorf("DNS mismatch: expected %s, found %s", req.OldIP, record.Content)
|
||||
}
|
||||
if m.verbose {
|
||||
fmt.Printf(" ✓ Current DNS points to expected IP: %s\n", req.OldIP)
|
||||
}
|
||||
|
||||
// Step 4: Verify via live DNS resolution
|
||||
resolvedIP, err := ResolveCurrentIP(req.Hostname)
|
||||
if err != nil {
|
||||
if m.verbose {
|
||||
fmt.Printf(" ⚠ Could not verify live DNS: %v\n", err)
|
||||
}
|
||||
} else if resolvedIP != req.OldIP {
|
||||
return nil, fmt.Errorf("live DNS mismatch: expected %s, resolved %s", req.OldIP, resolvedIP)
|
||||
} else if m.verbose {
|
||||
fmt.Printf(" ✓ Live DNS resolution verified: %s\n", resolvedIP)
|
||||
}
|
||||
|
||||
// Step 5: Perform migration (or dry-run)
|
||||
if req.DryRun {
|
||||
result.Success = true
|
||||
result.Message = fmt.Sprintf("[DRY RUN] Would update %s.%s: %s -> %s",
|
||||
subdomain, zone, req.OldIP, req.NewIP)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
if m.verbose {
|
||||
fmt.Println("\n=== Updating DNS ===")
|
||||
}
|
||||
|
||||
// Update the record
|
||||
newRecord := &DNSRecord{
|
||||
ID: record.ID,
|
||||
Type: record.Type,
|
||||
Name: record.Name,
|
||||
Content: req.NewIP,
|
||||
TTL: record.TTL,
|
||||
}
|
||||
|
||||
if err := m.dns.UpdateRecord(ctx, zone, newRecord); err != nil {
|
||||
return nil, fmt.Errorf("failed to update DNS: %w", err)
|
||||
}
|
||||
|
||||
result.NewRecord = newRecord
|
||||
|
||||
if m.verbose {
|
||||
fmt.Printf(" ✓ DNS updated: %s -> %s\n", req.OldIP, req.NewIP)
|
||||
}
|
||||
|
||||
// Step 6: Verify the update
|
||||
verifyRecord, err := m.dns.GetRecord(ctx, zone, "A", subdomain)
|
||||
if err != nil || verifyRecord.Content != req.NewIP {
|
||||
// Rollback
|
||||
if m.verbose {
|
||||
fmt.Println(" ✗ Verification failed, rolling back...")
|
||||
}
|
||||
|
||||
rollbackRecord := &DNSRecord{
|
||||
ID: record.ID,
|
||||
Type: record.Type,
|
||||
Name: record.Name,
|
||||
Content: req.OldIP,
|
||||
TTL: record.TTL,
|
||||
}
|
||||
|
||||
if rollbackErr := m.dns.UpdateRecord(ctx, zone, rollbackRecord); rollbackErr != nil {
|
||||
return nil, fmt.Errorf("CRITICAL: rollback failed: %w (original error: %v)", rollbackErr, err)
|
||||
}
|
||||
|
||||
result.RolledBack = true
|
||||
return nil, fmt.Errorf("DNS update verification failed, rolled back: %w", err)
|
||||
}
|
||||
|
||||
result.Success = true
|
||||
result.Message = fmt.Sprintf("DNS migration complete: %s now points to %s", req.Hostname, req.NewIP)
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// parseHostname extracts subdomain and zone from hostname
|
||||
func parseHostname(hostname, defaultZone string) (subdomain, zone string) {
|
||||
if defaultZone != "" {
|
||||
if strings.HasSuffix(hostname, "."+defaultZone) {
|
||||
subdomain = strings.TrimSuffix(hostname, "."+defaultZone)
|
||||
return subdomain, defaultZone
|
||||
}
|
||||
}
|
||||
|
||||
// Known zones
|
||||
knownZones := []string{
|
||||
"obr.sh", "obnh.io", "obnh.network", "obnh.org",
|
||||
"obr.digital", "obr.im", "s-n-r.net", "as60284.net", "baumert.cc",
|
||||
}
|
||||
|
||||
for _, z := range knownZones {
|
||||
if strings.HasSuffix(hostname, "."+z) {
|
||||
subdomain = strings.TrimSuffix(hostname, "."+z)
|
||||
return subdomain, z
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: assume last two parts are zone
|
||||
parts := strings.Split(hostname, ".")
|
||||
if len(parts) >= 2 {
|
||||
zone = strings.Join(parts[len(parts)-2:], ".")
|
||||
subdomain = strings.Join(parts[:len(parts)-2], ".")
|
||||
if subdomain == "" {
|
||||
subdomain = "@"
|
||||
}
|
||||
}
|
||||
|
||||
return subdomain, zone
|
||||
}
|
||||
226
internal/providers/cloudscale.go
Normal file
226
internal/providers/cloudscale.go
Normal file
@@ -0,0 +1,226 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/cloudscale-ch/cloudscale-go-sdk/v6"
|
||||
)
|
||||
|
||||
// CloudscaleProvider implements CloudProvider for Cloudscale.ch
|
||||
type CloudscaleProvider struct {
|
||||
client *cloudscale.Client
|
||||
}
|
||||
|
||||
// NewCloudscaleProvider creates a new Cloudscale provider
|
||||
func NewCloudscaleProvider(token string) (*CloudscaleProvider, error) {
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("cloudscale API token required")
|
||||
}
|
||||
|
||||
client := cloudscale.NewClient(http.DefaultClient)
|
||||
client.AuthToken = token
|
||||
|
||||
return &CloudscaleProvider{client: client}, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) Name() string {
|
||||
return "cloudscale"
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) CreateVM(ctx context.Context, opts VMOptions) (*VM, error) {
|
||||
zone := opts.Zone
|
||||
if zone == "" {
|
||||
zone = "lpg1" // Default: Lupfig
|
||||
}
|
||||
|
||||
image := opts.Image
|
||||
if image == "" {
|
||||
image = "ubuntu-24.04"
|
||||
}
|
||||
|
||||
flavor := opts.Flavor
|
||||
if flavor == "" {
|
||||
flavor = "flex-4-2" // 4 vCPU, 2GB RAM
|
||||
}
|
||||
|
||||
req := &cloudscale.ServerRequest{
|
||||
Name: opts.Name,
|
||||
Flavor: flavor,
|
||||
Image: image,
|
||||
Zone: zone,
|
||||
SSHKeys: []string{opts.SSHPublicKey},
|
||||
VolumeSizeGB: int(opts.DiskSizeGB),
|
||||
}
|
||||
|
||||
if opts.UserData != "" {
|
||||
req.UserData = opts.UserData
|
||||
}
|
||||
|
||||
server, err := p.client.Servers.Create(ctx, req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create server: %w", err)
|
||||
}
|
||||
|
||||
// Wait for server to be running
|
||||
for i := 0; i < 60; i++ {
|
||||
server, err = p.client.Servers.Get(ctx, server.UUID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get server status: %w", err)
|
||||
}
|
||||
if server.Status == "running" {
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
// Get public IP
|
||||
var publicIP string
|
||||
for _, iface := range server.Interfaces {
|
||||
if iface.Type == "public" {
|
||||
for _, addr := range iface.Addresses {
|
||||
if addr.Version == 4 {
|
||||
publicIP = addr.Address
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: server.UUID,
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: server.Status,
|
||||
Provider: "cloudscale",
|
||||
Zone: server.Zone.Slug,
|
||||
CreatedAt: server.CreatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) DeleteVM(ctx context.Context, vmID string) error {
|
||||
return p.client.Servers.Delete(ctx, vmID)
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) GetVM(ctx context.Context, vmID string) (*VM, error) {
|
||||
server, err := p.client.Servers.Get(ctx, vmID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
for _, iface := range server.Interfaces {
|
||||
if iface.Type == "public" {
|
||||
for _, addr := range iface.Addresses {
|
||||
if addr.Version == 4 {
|
||||
publicIP = addr.Address
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: server.UUID,
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: server.Status,
|
||||
Provider: "cloudscale",
|
||||
Zone: server.Zone.Slug,
|
||||
CreatedAt: server.CreatedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("SSH not available at %s after %v", address, timeout)
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) ListFlavors(ctx context.Context) ([]Flavor, error) {
|
||||
// Make raw API request to /v1/flavors
|
||||
req, err := p.client.NewRequest(ctx, "GET", "v1/flavors", nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var flavors []cloudscale.Flavor
|
||||
if err := p.client.Do(ctx, req, &flavors); err != nil {
|
||||
return nil, fmt.Errorf("failed to list flavors: %w", err)
|
||||
}
|
||||
|
||||
var result []Flavor
|
||||
for _, f := range flavors {
|
||||
result = append(result, Flavor{
|
||||
ID: f.Slug,
|
||||
Name: f.Name,
|
||||
CPUs: f.VCPUCount,
|
||||
Memory: f.MemoryGB * 1024, // Convert to MB
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) ListImages(ctx context.Context, filter string) ([]Image, error) {
|
||||
// Make raw API request to /v1/images
|
||||
req, err := p.client.NewRequest(ctx, "GET", "v1/images", nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
var images []cloudscale.Image
|
||||
if err := p.client.Do(ctx, req, &images); err != nil {
|
||||
return nil, fmt.Errorf("failed to list images: %w", err)
|
||||
}
|
||||
|
||||
var result []Image
|
||||
for _, img := range images {
|
||||
if filter == "" || contains(img.Slug, filter) || contains(img.Name, filter) {
|
||||
result = append(result, Image{
|
||||
ID: img.Slug,
|
||||
Name: img.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *CloudscaleProvider) ListZones(ctx context.Context) ([]string, error) {
|
||||
regions, err := p.client.Regions.List(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zones []string
|
||||
for _, r := range regions {
|
||||
for _, z := range r.Zones {
|
||||
zones = append(zones, z.Slug)
|
||||
}
|
||||
}
|
||||
|
||||
return zones, nil
|
||||
}
|
||||
319
internal/providers/exoscale.go
Normal file
319
internal/providers/exoscale.go
Normal file
@@ -0,0 +1,319 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
v3 "github.com/exoscale/egoscale/v3"
|
||||
"github.com/exoscale/egoscale/v3/credentials"
|
||||
)
|
||||
|
||||
// ExoscaleProvider implements CloudProvider for Exoscale
|
||||
type ExoscaleProvider struct {
|
||||
creds *credentials.Credentials
|
||||
}
|
||||
|
||||
// NewExoscaleProvider creates a new Exoscale provider
|
||||
func NewExoscaleProvider(apiKey, apiSecret string) (*ExoscaleProvider, error) {
|
||||
creds := credentials.NewStaticCredentials(apiKey, apiSecret)
|
||||
return &ExoscaleProvider{creds: creds}, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) Name() string {
|
||||
return "exoscale"
|
||||
}
|
||||
|
||||
// getClientForZone creates a zone-specific client
|
||||
func (p *ExoscaleProvider) getClientForZone(zone string) (*v3.Client, error) {
|
||||
endpoint := p.getEndpointForZone(zone)
|
||||
return v3.NewClient(p.creds, v3.ClientOptWithEndpoint(endpoint))
|
||||
}
|
||||
|
||||
// getEndpointForZone maps zone names to API endpoints
|
||||
func (p *ExoscaleProvider) getEndpointForZone(zone string) v3.Endpoint {
|
||||
endpoints := map[string]v3.Endpoint{
|
||||
"ch-gva-2": v3.CHGva2,
|
||||
"ch-dk-2": v3.CHDk2,
|
||||
"de-fra-1": v3.DEFra1,
|
||||
"de-muc-1": v3.DEMuc1,
|
||||
"at-vie-1": v3.ATVie1,
|
||||
"at-vie-2": v3.ATVie2,
|
||||
"bg-sof-1": v3.BGSof1,
|
||||
}
|
||||
|
||||
if endpoint, ok := endpoints[zone]; ok {
|
||||
return endpoint
|
||||
}
|
||||
return v3.CHGva2 // Default
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) CreateVM(ctx context.Context, opts VMOptions) (*VM, error) {
|
||||
// Get zone endpoint
|
||||
zone := opts.Zone
|
||||
if zone == "" {
|
||||
zone = "ch-gva-2" // Default zone
|
||||
}
|
||||
|
||||
// Create client for specific zone
|
||||
client, err := p.getClientForZone(zone)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create zone client: %w", err)
|
||||
}
|
||||
|
||||
// Find template (image)
|
||||
templates, err := client.ListTemplates(ctx, v3.ListTemplatesWithVisibility("public"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list templates: %w", err)
|
||||
}
|
||||
|
||||
selectedTemplate, err := templates.FindTemplate(opts.Image)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("template not found: %s (%w)", opts.Image, err)
|
||||
}
|
||||
|
||||
// Find instance type
|
||||
instanceTypes, err := client.ListInstanceTypes(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list instance types: %w", err)
|
||||
}
|
||||
|
||||
selectedType, err := instanceTypes.FindInstanceTypeByIdOrFamilyAndSize(opts.Flavor)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("instance type not found: %s (%w)", opts.Flavor, err)
|
||||
}
|
||||
|
||||
// Determine disk size
|
||||
diskSize := opts.DiskSizeGB
|
||||
if diskSize == 0 {
|
||||
diskSize = 50 // Default 50GB
|
||||
}
|
||||
|
||||
// Register the SSH key temporarily
|
||||
sshKeyName := fmt.Sprintf("recovery-%s-%d", opts.Name, time.Now().Unix())
|
||||
sshKeyOp, err := client.RegisterSSHKey(ctx, v3.RegisterSSHKeyRequest{
|
||||
Name: sshKeyName,
|
||||
PublicKey: opts.SSHPublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register SSH key: %w", err)
|
||||
}
|
||||
|
||||
// Wait for SSH key registration
|
||||
sshKeyOp, err = client.Wait(ctx, sshKeyOp, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SSH key registration failed: %w", err)
|
||||
}
|
||||
|
||||
// Create the instance
|
||||
createReq := v3.CreateInstanceRequest{
|
||||
Name: opts.Name,
|
||||
InstanceType: &selectedType,
|
||||
Template: &selectedTemplate,
|
||||
DiskSize: diskSize,
|
||||
SSHKey: &v3.SSHKey{Name: sshKeyName},
|
||||
}
|
||||
|
||||
// Add user data if provided
|
||||
if opts.UserData != "" {
|
||||
createReq.UserData = opts.UserData
|
||||
}
|
||||
|
||||
op, err := client.CreateInstance(ctx, createReq)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create instance: %w", err)
|
||||
}
|
||||
|
||||
// Wait for operation to complete
|
||||
op, err = client.Wait(ctx, op, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("instance creation failed: %w", err)
|
||||
}
|
||||
|
||||
// Get the created instance
|
||||
if op.Reference == nil {
|
||||
return nil, fmt.Errorf("operation completed but no reference returned")
|
||||
}
|
||||
|
||||
instance, err := client.GetInstance(ctx, op.Reference.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get created instance: %w", err)
|
||||
}
|
||||
|
||||
// Extract public IP
|
||||
var publicIP string
|
||||
if instance.PublicIP != nil {
|
||||
publicIP = instance.PublicIP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: string(instance.ID),
|
||||
Name: instance.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(instance.State),
|
||||
Provider: "exoscale",
|
||||
Zone: zone,
|
||||
CreatedAt: instance.CreatedAT,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) DeleteVM(ctx context.Context, vmID string) error {
|
||||
// We need to find which zone the VM is in
|
||||
zones := []string{"ch-gva-2", "ch-dk-2", "de-fra-1", "de-muc-1", "at-vie-1", "at-vie-2", "bg-sof-1"}
|
||||
|
||||
for _, zone := range zones {
|
||||
client, err := p.getClientForZone(zone)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
op, err := client.DeleteInstance(ctx, v3.UUID(vmID))
|
||||
if err != nil {
|
||||
continue // Try next zone
|
||||
}
|
||||
|
||||
_, err = client.Wait(ctx, op, v3.OperationStateSuccess)
|
||||
if err != nil {
|
||||
return fmt.Errorf("delete failed: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("VM %s not found in any zone", vmID)
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) GetVM(ctx context.Context, vmID string) (*VM, error) {
|
||||
zones := []string{"ch-gva-2", "ch-dk-2", "de-fra-1", "de-muc-1", "at-vie-1", "at-vie-2", "bg-sof-1"}
|
||||
|
||||
for _, zone := range zones {
|
||||
client, err := p.getClientForZone(zone)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
instance, err := client.GetInstance(ctx, v3.UUID(vmID))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
if instance.PublicIP != nil {
|
||||
publicIP = instance.PublicIP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: string(instance.ID),
|
||||
Name: instance.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(instance.State),
|
||||
Provider: "exoscale",
|
||||
Zone: zone,
|
||||
CreatedAt: instance.CreatedAT,
|
||||
}, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("VM %s not found", vmID)
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
// Continue trying
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("SSH not available at %s after %v", address, timeout)
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) ListFlavors(ctx context.Context) ([]Flavor, error) {
|
||||
// Use default zone for listing
|
||||
client, err := p.getClientForZone("ch-gva-2")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
types, err := client.ListInstanceTypes(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list instance types: %w", err)
|
||||
}
|
||||
|
||||
var flavors []Flavor
|
||||
for _, it := range types.InstanceTypes {
|
||||
flavors = append(flavors, Flavor{
|
||||
ID: string(it.ID),
|
||||
Name: string(it.Size),
|
||||
CPUs: int(it.Cpus),
|
||||
Memory: int(it.Memory),
|
||||
})
|
||||
}
|
||||
|
||||
return flavors, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) ListImages(ctx context.Context, filter string) ([]Image, error) {
|
||||
client, err := p.getClientForZone("ch-gva-2")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
templates, err := client.ListTemplates(ctx, v3.ListTemplatesWithVisibility("public"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list templates: %w", err)
|
||||
}
|
||||
|
||||
var images []Image
|
||||
for _, tmpl := range templates.Templates {
|
||||
if filter == "" || contains(tmpl.Name, filter) {
|
||||
images = append(images, Image{
|
||||
ID: string(tmpl.ID),
|
||||
Name: tmpl.Name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return images, nil
|
||||
}
|
||||
|
||||
func (p *ExoscaleProvider) ListZones(ctx context.Context) ([]string, error) {
|
||||
return []string{
|
||||
"ch-gva-2", // Geneva
|
||||
"ch-dk-2", // Zurich
|
||||
"de-fra-1", // Frankfurt
|
||||
"de-muc-1", // Munich
|
||||
"at-vie-1", // Vienna 1
|
||||
"at-vie-2", // Vienna 2
|
||||
"bg-sof-1", // Sofia
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Helper function
|
||||
func contains(s, substr string) bool {
|
||||
return len(s) >= len(substr) && (s == substr || len(substr) == 0 ||
|
||||
(len(s) > 0 && len(substr) > 0 && findSubstring(s, substr)))
|
||||
}
|
||||
|
||||
func findSubstring(s, substr string) bool {
|
||||
for i := 0; i <= len(s)-len(substr); i++ {
|
||||
if s[i:i+len(substr)] == substr {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
40
internal/providers/factory.go
Normal file
40
internal/providers/factory.go
Normal file
@@ -0,0 +1,40 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// NewProvider creates a cloud provider by name
|
||||
func NewProvider(name string, config map[string]string) (CloudProvider, error) {
|
||||
switch name {
|
||||
case "exoscale":
|
||||
apiKey := config["api_key"]
|
||||
apiSecret := config["api_secret"]
|
||||
if apiKey == "" || apiSecret == "" {
|
||||
return nil, fmt.Errorf("exoscale requires api_key and api_secret")
|
||||
}
|
||||
return NewExoscaleProvider(apiKey, apiSecret)
|
||||
|
||||
case "cloudscale":
|
||||
token := config["token"]
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("cloudscale requires token")
|
||||
}
|
||||
return NewCloudscaleProvider(token)
|
||||
|
||||
case "hetzner":
|
||||
token := config["token"]
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("hetzner requires token")
|
||||
}
|
||||
return NewHetznerProvider(token)
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown provider: %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
// SupportedProviders returns list of supported cloud providers
|
||||
func SupportedProviders() []string {
|
||||
return []string{"exoscale", "cloudscale", "hetzner"}
|
||||
}
|
||||
233
internal/providers/hetzner.go
Normal file
233
internal/providers/hetzner.go
Normal file
@@ -0,0 +1,233 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/hetznercloud/hcloud-go/v2/hcloud"
|
||||
)
|
||||
|
||||
// HetznerProvider implements CloudProvider for Hetzner Cloud
|
||||
type HetznerProvider struct {
|
||||
client *hcloud.Client
|
||||
}
|
||||
|
||||
// NewHetznerProvider creates a new Hetzner provider
|
||||
func NewHetznerProvider(token string) (*HetznerProvider, error) {
|
||||
if token == "" {
|
||||
return nil, fmt.Errorf("hetzner API token required")
|
||||
}
|
||||
|
||||
client := hcloud.NewClient(hcloud.WithToken(token))
|
||||
|
||||
return &HetznerProvider{client: client}, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) Name() string {
|
||||
return "hetzner"
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) CreateVM(ctx context.Context, opts VMOptions) (*VM, error) {
|
||||
// Parse zone/location
|
||||
location := opts.Zone
|
||||
if location == "" {
|
||||
location = "fsn1" // Default: Falkenstein
|
||||
}
|
||||
|
||||
image := opts.Image
|
||||
if image == "" {
|
||||
image = "ubuntu-24.04"
|
||||
}
|
||||
|
||||
serverType := opts.Flavor
|
||||
if serverType == "" {
|
||||
serverType = "cx22" // 2 vCPU, 4GB RAM
|
||||
}
|
||||
|
||||
// Register SSH key if provided
|
||||
var sshKeys []*hcloud.SSHKey
|
||||
if opts.SSHPublicKey != "" {
|
||||
keyName := fmt.Sprintf("recovery-%s-%d", opts.Name, time.Now().Unix())
|
||||
sshKey, _, err := p.client.SSHKey.Create(ctx, hcloud.SSHKeyCreateOpts{
|
||||
Name: keyName,
|
||||
PublicKey: opts.SSHPublicKey,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to register SSH key: %w", err)
|
||||
}
|
||||
sshKeys = append(sshKeys, sshKey)
|
||||
}
|
||||
|
||||
// Create server
|
||||
createOpts := hcloud.ServerCreateOpts{
|
||||
Name: opts.Name,
|
||||
ServerType: &hcloud.ServerType{Name: serverType},
|
||||
Image: &hcloud.Image{Name: image},
|
||||
Location: &hcloud.Location{Name: location},
|
||||
SSHKeys: sshKeys,
|
||||
}
|
||||
|
||||
if opts.UserData != "" {
|
||||
createOpts.UserData = opts.UserData
|
||||
}
|
||||
|
||||
result, _, err := p.client.Server.Create(ctx, createOpts)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create server: %w", err)
|
||||
}
|
||||
|
||||
// Wait for server to be running
|
||||
server := result.Server
|
||||
for i := 0; i < 60; i++ {
|
||||
server, _, err = p.client.Server.GetByID(ctx, server.ID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get server status: %w", err)
|
||||
}
|
||||
if server.Status == hcloud.ServerStatusRunning {
|
||||
break
|
||||
}
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
if server.PublicNet.IPv4.IP != nil {
|
||||
publicIP = server.PublicNet.IPv4.IP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: fmt.Sprintf("%d", server.ID),
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(server.Status),
|
||||
Provider: "hetzner",
|
||||
Zone: server.Datacenter.Location.Name,
|
||||
CreatedAt: server.Created,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) DeleteVM(ctx context.Context, vmID string) error {
|
||||
var id int64
|
||||
fmt.Sscanf(vmID, "%d", &id)
|
||||
|
||||
server, _, err := p.client.Server.GetByID(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if server == nil {
|
||||
return fmt.Errorf("server not found: %s", vmID)
|
||||
}
|
||||
|
||||
_, _, err = p.client.Server.DeleteWithResult(ctx, server)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) GetVM(ctx context.Context, vmID string) (*VM, error) {
|
||||
var id int64
|
||||
fmt.Sscanf(vmID, "%d", &id)
|
||||
|
||||
server, _, err := p.client.Server.GetByID(ctx, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if server == nil {
|
||||
return nil, fmt.Errorf("server not found: %s", vmID)
|
||||
}
|
||||
|
||||
var publicIP string
|
||||
if server.PublicNet.IPv4.IP != nil {
|
||||
publicIP = server.PublicNet.IPv4.IP.String()
|
||||
}
|
||||
|
||||
return &VM{
|
||||
ID: fmt.Sprintf("%d", server.ID),
|
||||
Name: server.Name,
|
||||
PublicIP: publicIP,
|
||||
Status: string(server.Status),
|
||||
Provider: "hetzner",
|
||||
Zone: server.Datacenter.Location.Name,
|
||||
CreatedAt: server.Created,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error {
|
||||
if port == 0 {
|
||||
port = 22
|
||||
}
|
||||
|
||||
deadline := time.Now().Add(timeout)
|
||||
address := fmt.Sprintf("%s:%d", ip, port)
|
||||
|
||||
for time.Now().Before(deadline) {
|
||||
conn, err := net.DialTimeout("tcp", address, 5*time.Second)
|
||||
if err == nil {
|
||||
conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-time.After(5 * time.Second):
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("SSH not available at %s after %v", address, timeout)
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) ListFlavors(ctx context.Context) ([]Flavor, error) {
|
||||
types, err := p.client.ServerType.All(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []Flavor
|
||||
for _, t := range types {
|
||||
result = append(result, Flavor{
|
||||
ID: t.Name,
|
||||
Name: t.Description,
|
||||
CPUs: t.Cores,
|
||||
Memory: int(t.Memory * 1024), // Convert GB to MB
|
||||
Disk: t.Disk,
|
||||
})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) ListImages(ctx context.Context, filter string) ([]Image, error) {
|
||||
images, err := p.client.Image.All(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var result []Image
|
||||
for _, img := range images {
|
||||
if img.Type != hcloud.ImageTypeSystem {
|
||||
continue // Only show system images
|
||||
}
|
||||
if filter == "" || contains(img.Name, filter) || contains(img.Description, filter) {
|
||||
result = append(result, Image{
|
||||
ID: img.Name,
|
||||
Name: img.Description,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (p *HetznerProvider) ListZones(ctx context.Context) ([]string, error) {
|
||||
locations, err := p.client.Location.All(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var zones []string
|
||||
for _, l := range locations {
|
||||
zones = append(zones, l.Name)
|
||||
}
|
||||
|
||||
return zones, nil
|
||||
}
|
||||
96
internal/providers/provider.go
Normal file
96
internal/providers/provider.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package providers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// VMOptions contains options for creating a new VM
|
||||
type VMOptions struct {
|
||||
Name string
|
||||
Zone string // e.g., "ch-gva-2", "lpg1", "fsn1"
|
||||
Flavor string // Instance type/size
|
||||
Image string // OS image name or ID
|
||||
SSHPublicKey string // Ephemeral public key content
|
||||
UserData string // Cloud-init script
|
||||
DiskSizeGB int64 // Root disk size
|
||||
Tags map[string]string // Optional tags/labels
|
||||
}
|
||||
|
||||
// VM represents a created virtual machine
|
||||
type VM struct {
|
||||
ID string
|
||||
Name string
|
||||
PublicIP string
|
||||
PrivateIP string
|
||||
Status string
|
||||
Provider string
|
||||
Zone string
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// Flavor represents an instance type/size
|
||||
type Flavor struct {
|
||||
ID string
|
||||
Name string
|
||||
CPUs int
|
||||
Memory int // MB
|
||||
Disk int // GB (if applicable)
|
||||
}
|
||||
|
||||
// Image represents an OS image
|
||||
type Image struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
// CloudProvider defines the interface for cloud providers
|
||||
type CloudProvider interface {
|
||||
// Name returns the provider name
|
||||
Name() string
|
||||
|
||||
// CreateVM creates a new virtual machine
|
||||
CreateVM(ctx context.Context, opts VMOptions) (*VM, error)
|
||||
|
||||
// DeleteVM deletes a virtual machine by ID
|
||||
DeleteVM(ctx context.Context, vmID string) error
|
||||
|
||||
// GetVM gets VM details by ID
|
||||
GetVM(ctx context.Context, vmID string) (*VM, error)
|
||||
|
||||
// WaitForSSH waits until SSH is available on the VM
|
||||
WaitForSSH(ctx context.Context, ip string, port int, timeout time.Duration) error
|
||||
|
||||
// ListFlavors lists available instance types
|
||||
ListFlavors(ctx context.Context) ([]Flavor, error)
|
||||
|
||||
// ListImages lists available OS images
|
||||
ListImages(ctx context.Context, filter string) ([]Image, error)
|
||||
|
||||
// ListZones lists available zones/regions
|
||||
ListZones(ctx context.Context) ([]string, error)
|
||||
}
|
||||
|
||||
// GenerateCloudInit creates a cloud-init user-data script
|
||||
func GenerateCloudInit(ephemeralPubKey string) string {
|
||||
return `#cloud-config
|
||||
ssh_pwauth: false
|
||||
users:
|
||||
- name: root
|
||||
ssh_authorized_keys:
|
||||
- "` + ephemeralPubKey + `"
|
||||
package_update: true
|
||||
packages:
|
||||
- rsync
|
||||
- docker.io
|
||||
- docker-compose
|
||||
- wireguard-tools
|
||||
write_files:
|
||||
- path: /var/tmp/recovery-ready
|
||||
content: "ready"
|
||||
permissions: '0644'
|
||||
runcmd:
|
||||
- systemctl enable docker
|
||||
- systemctl start docker
|
||||
`
|
||||
}
|
||||
102
internal/restore/docker.go
Normal file
102
internal/restore/docker.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// startDocker ensures Docker is running and starts compose stacks
|
||||
func (p *Pipeline) startDocker(ctx context.Context) error {
|
||||
// Ensure Docker is enabled and running
|
||||
if err := p.remoteCmd(ctx, "systemctl enable docker"); err != nil {
|
||||
return fmt.Errorf("failed to enable docker: %w", err)
|
||||
}
|
||||
|
||||
if err := p.remoteCmd(ctx, "systemctl start docker"); err != nil {
|
||||
return fmt.Errorf("failed to start docker: %w", err)
|
||||
}
|
||||
|
||||
// Wait for Docker to be ready
|
||||
for i := 0; i < 30; i++ {
|
||||
if err := p.remoteCmd(ctx, "docker info > /dev/null 2>&1"); err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
// Find and start docker-compose stacks
|
||||
findCmd := "find /opt -name 'docker-compose.yml' -o -name 'docker-compose.yaml' -o -name 'compose.yml' -o -name 'compose.yaml' 2>/dev/null | head -20"
|
||||
output, err := p.remoteCmdOutput(ctx, findCmd)
|
||||
if err != nil || strings.TrimSpace(output) == "" {
|
||||
if p.Verbose {
|
||||
fmt.Println(" No docker-compose files found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
composeFiles := strings.Split(strings.TrimSpace(output), "\n")
|
||||
for _, file := range composeFiles {
|
||||
if file == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Get directory containing compose file
|
||||
dir := file[:strings.LastIndex(file, "/")]
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Starting compose stack in %s\n", dir)
|
||||
}
|
||||
|
||||
// Try docker compose (v2) first, fall back to docker-compose (v1)
|
||||
startCmd := fmt.Sprintf("cd %s && (docker compose up -d 2>/dev/null || docker-compose up -d)", dir)
|
||||
if err := p.remoteCmd(ctx, startCmd); err != nil {
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Warning: failed to start stack in %s: %v\n", dir, err)
|
||||
}
|
||||
// Don't fail on individual stack failures
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// runHealth performs health verification
|
||||
func (p *Pipeline) runHealth(ctx context.Context) error {
|
||||
checks := []struct {
|
||||
name string
|
||||
cmd string
|
||||
require bool
|
||||
}{
|
||||
{"SSH accessible", "echo ok", true},
|
||||
{"Docker running", "docker info > /dev/null 2>&1 && echo ok", true},
|
||||
{"Network connectivity", "ping -c 1 8.8.8.8 > /dev/null 2>&1 && echo ok", false},
|
||||
{"DNS resolution", "host google.com > /dev/null 2>&1 && echo ok", false},
|
||||
}
|
||||
|
||||
var failures []string
|
||||
|
||||
for _, check := range checks {
|
||||
output, err := p.remoteCmdOutput(ctx, check.cmd)
|
||||
success := err == nil && strings.TrimSpace(output) == "ok"
|
||||
|
||||
status := "✓"
|
||||
if !success {
|
||||
status = "✗"
|
||||
if check.require {
|
||||
failures = append(failures, check.name)
|
||||
}
|
||||
}
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" %s %s\n", status, check.name)
|
||||
}
|
||||
}
|
||||
|
||||
if len(failures) > 0 {
|
||||
return fmt.Errorf("required health checks failed: %v", failures)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
130
internal/restore/pipeline.go
Normal file
130
internal/restore/pipeline.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"recover-server/internal/backup"
|
||||
"recover-server/internal/providers"
|
||||
)
|
||||
|
||||
// Stage represents a restore stage
|
||||
type Stage int
|
||||
|
||||
const (
|
||||
StageSync Stage = iota + 1
|
||||
StageEtc
|
||||
StageSelectiveEtc
|
||||
StageSSHKeys
|
||||
StageServices
|
||||
StageHealth
|
||||
)
|
||||
|
||||
func (s Stage) String() string {
|
||||
names := map[Stage]string{
|
||||
StageSync: "Sync /root and /opt",
|
||||
StageEtc: "Stage /etc backup",
|
||||
StageSelectiveEtc: "Selective /etc restore",
|
||||
StageSSHKeys: "Merge SSH keys",
|
||||
StageServices: "Start services",
|
||||
StageHealth: "Health verification",
|
||||
}
|
||||
return names[s]
|
||||
}
|
||||
|
||||
// StageResult contains the result of a stage execution
|
||||
type StageResult struct {
|
||||
Stage Stage
|
||||
Success bool
|
||||
Message string
|
||||
Duration time.Duration
|
||||
Error error
|
||||
}
|
||||
|
||||
// Pipeline orchestrates the restore process
|
||||
type Pipeline struct {
|
||||
VM *providers.VM
|
||||
BackupSource backup.BackupSource
|
||||
HostName string
|
||||
SSHKeyPath string // Path to ephemeral private key
|
||||
SSHUser string // Usually "root"
|
||||
DryRun bool
|
||||
Verbose bool
|
||||
|
||||
results []StageResult
|
||||
}
|
||||
|
||||
// NewPipeline creates a new restore pipeline
|
||||
func NewPipeline(vm *providers.VM, source backup.BackupSource, host, sshKeyPath string) *Pipeline {
|
||||
return &Pipeline{
|
||||
VM: vm,
|
||||
BackupSource: source,
|
||||
HostName: host,
|
||||
SSHKeyPath: sshKeyPath,
|
||||
SSHUser: "root",
|
||||
results: make([]StageResult, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Run executes all stages
|
||||
func (p *Pipeline) Run(ctx context.Context) error {
|
||||
stages := []struct {
|
||||
stage Stage
|
||||
fn func(context.Context) error
|
||||
}{
|
||||
{StageSync, p.runSync},
|
||||
{StageEtc, p.runEtcStaging},
|
||||
{StageSelectiveEtc, p.runSelectiveEtc},
|
||||
{StageSSHKeys, p.runSSHKeyMerge},
|
||||
{StageServices, p.runServices},
|
||||
{StageHealth, p.runHealth},
|
||||
}
|
||||
|
||||
for _, s := range stages {
|
||||
start := time.Now()
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf("\n=== Stage %d: %s ===\n", s.stage, s.stage)
|
||||
}
|
||||
|
||||
if p.DryRun {
|
||||
p.results = append(p.results, StageResult{
|
||||
Stage: s.stage,
|
||||
Success: true,
|
||||
Message: "[DRY RUN] Would execute: " + s.stage.String(),
|
||||
Duration: 0,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
err := s.fn(ctx)
|
||||
result := StageResult{
|
||||
Stage: s.stage,
|
||||
Success: err == nil,
|
||||
Duration: time.Since(start),
|
||||
Error: err,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
result.Message = err.Error()
|
||||
p.results = append(p.results, result)
|
||||
return fmt.Errorf("stage %d (%s) failed: %w", s.stage, s.stage, err)
|
||||
}
|
||||
|
||||
result.Message = "Completed successfully"
|
||||
p.results = append(p.results, result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Results returns all stage results
|
||||
func (p *Pipeline) Results() []StageResult {
|
||||
return p.results
|
||||
}
|
||||
|
||||
// sshTarget returns the SSH target string
|
||||
func (p *Pipeline) sshTarget() string {
|
||||
return fmt.Sprintf("%s@%s", p.SSHUser, p.VM.PublicIP)
|
||||
}
|
||||
110
internal/restore/ssh.go
Normal file
110
internal/restore/ssh.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
|
||||
// SSHKeyPair holds an ephemeral SSH key pair
|
||||
type SSHKeyPair struct {
|
||||
PrivateKeyPath string
|
||||
PublicKey string
|
||||
}
|
||||
|
||||
// GenerateEphemeralKey creates a temporary ED25519 SSH key pair
|
||||
func GenerateEphemeralKey() (*SSHKeyPair, error) {
|
||||
// Generate ED25519 key pair
|
||||
pubKey, privKey, err := ed25519.GenerateKey(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate key: %w", err)
|
||||
}
|
||||
|
||||
// Convert to SSH format
|
||||
sshPubKey, err := ssh.NewPublicKey(pubKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create SSH public key: %w", err)
|
||||
}
|
||||
|
||||
// Marshal public key
|
||||
pubKeyStr := strings.TrimSpace(string(ssh.MarshalAuthorizedKey(sshPubKey)))
|
||||
|
||||
// Create temp directory for key
|
||||
tmpDir, err := os.MkdirTemp("", "recover-ssh-")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create temp dir: %w", err)
|
||||
}
|
||||
|
||||
// Write private key in OpenSSH format
|
||||
privKeyPath := filepath.Join(tmpDir, "id_ed25519")
|
||||
|
||||
// Marshal private key to OpenSSH format
|
||||
pemBlock, err := ssh.MarshalPrivateKey(privKey, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal private key: %w", err)
|
||||
}
|
||||
|
||||
privKeyPEM := pem.EncodeToMemory(pemBlock)
|
||||
if err := os.WriteFile(privKeyPath, privKeyPEM, 0600); err != nil {
|
||||
return nil, fmt.Errorf("failed to write private key: %w", err)
|
||||
}
|
||||
|
||||
return &SSHKeyPair{
|
||||
PrivateKeyPath: privKeyPath,
|
||||
PublicKey: pubKeyStr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Cleanup removes the ephemeral key files
|
||||
func (k *SSHKeyPair) Cleanup() {
|
||||
if k.PrivateKeyPath != "" {
|
||||
os.RemoveAll(filepath.Dir(k.PrivateKeyPath))
|
||||
}
|
||||
}
|
||||
|
||||
// runSSHKeyMerge merges original authorized_keys with ephemeral key
|
||||
func (p *Pipeline) runSSHKeyMerge(ctx context.Context) error {
|
||||
// First, backup current authorized_keys
|
||||
backupCmd := "cp /root/.ssh/authorized_keys /root/.ssh/authorized_keys.ephemeral 2>/dev/null || true"
|
||||
p.remoteCmd(ctx, backupCmd)
|
||||
|
||||
// Check if we have original keys in the restored /root
|
||||
checkCmd := "cat /root/.ssh/authorized_keys.original 2>/dev/null || cat /srv/restore/root/.ssh/authorized_keys 2>/dev/null || echo ''"
|
||||
originalKeys, _ := p.remoteCmdOutput(ctx, checkCmd)
|
||||
|
||||
// Get current (ephemeral) keys
|
||||
currentKeys, _ := p.remoteCmdOutput(ctx, "cat /root/.ssh/authorized_keys 2>/dev/null || echo ''")
|
||||
|
||||
// Merge keys (unique)
|
||||
allKeys := make(map[string]bool)
|
||||
for _, key := range strings.Split(currentKeys, "\n") {
|
||||
key = strings.TrimSpace(key)
|
||||
if key != "" && !strings.HasPrefix(key, "#") {
|
||||
allKeys[key] = true
|
||||
}
|
||||
}
|
||||
for _, key := range strings.Split(originalKeys, "\n") {
|
||||
key = strings.TrimSpace(key)
|
||||
if key != "" && !strings.HasPrefix(key, "#") {
|
||||
allKeys[key] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Write merged keys
|
||||
var mergedKeys []string
|
||||
for key := range allKeys {
|
||||
mergedKeys = append(mergedKeys, key)
|
||||
}
|
||||
|
||||
mergeCmd := fmt.Sprintf("mkdir -p /root/.ssh && echo '%s' > /root/.ssh/authorized_keys && chmod 600 /root/.ssh/authorized_keys",
|
||||
strings.Join(mergedKeys, "\n"))
|
||||
|
||||
return p.remoteCmd(ctx, mergeCmd)
|
||||
}
|
||||
109
internal/restore/stages.go
Normal file
109
internal/restore/stages.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// /etc whitelist - only these are restored
|
||||
var etcWhitelist = []string{
|
||||
"wireguard",
|
||||
"letsencrypt",
|
||||
"nginx",
|
||||
"rsyslog-certs",
|
||||
"systemd/system",
|
||||
"docker",
|
||||
"hostname",
|
||||
"hosts",
|
||||
"passwd",
|
||||
"group",
|
||||
"shadow",
|
||||
"gshadow",
|
||||
}
|
||||
|
||||
// runSync syncs /root and /opt from backup
|
||||
func (p *Pipeline) runSync(ctx context.Context) error {
|
||||
dirs := []string{"root", "opt"}
|
||||
return p.BackupSource.SyncTo(ctx, p.HostName, p.sshTarget(), p.SSHKeyPath, dirs)
|
||||
}
|
||||
|
||||
// runEtcStaging stages /etc to /srv/restore/etc
|
||||
func (p *Pipeline) runEtcStaging(ctx context.Context) error {
|
||||
// Create staging directory on target
|
||||
if err := p.remoteCmd(ctx, "mkdir -p /srv/restore"); err != nil {
|
||||
return fmt.Errorf("failed to create staging dir: %w", err)
|
||||
}
|
||||
|
||||
// Sync /etc to staging
|
||||
dirs := []string{"etc"}
|
||||
return p.BackupSource.SyncTo(ctx, p.HostName, p.sshTarget(), p.SSHKeyPath, dirs)
|
||||
}
|
||||
|
||||
// runSelectiveEtc copies only whitelisted items from staged /etc
|
||||
func (p *Pipeline) runSelectiveEtc(ctx context.Context) error {
|
||||
for _, item := range etcWhitelist {
|
||||
src := fmt.Sprintf("/srv/restore/etc/%s", item)
|
||||
dst := fmt.Sprintf("/etc/%s", item)
|
||||
|
||||
// Check if source exists
|
||||
checkCmd := fmt.Sprintf("test -e %s && echo exists || echo missing", src)
|
||||
output, err := p.remoteCmdOutput(ctx, checkCmd)
|
||||
if err != nil || strings.TrimSpace(output) == "missing" {
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Skipping %s (not in backup)\n", item)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Create parent directory if needed
|
||||
parentDir := fmt.Sprintf("/etc/%s", strings.Split(item, "/")[0])
|
||||
if strings.Contains(item, "/") {
|
||||
p.remoteCmd(ctx, fmt.Sprintf("mkdir -p %s", parentDir))
|
||||
}
|
||||
|
||||
// Copy with rsync for proper permissions
|
||||
copyCmd := fmt.Sprintf("rsync -av %s %s", src, dst)
|
||||
if err := p.remoteCmd(ctx, copyCmd); err != nil {
|
||||
return fmt.Errorf("failed to restore %s: %w", item, err)
|
||||
}
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Restored %s\n", item)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// remoteCmd runs a command on the target VM
|
||||
func (p *Pipeline) remoteCmd(ctx context.Context, cmd string) error {
|
||||
sshArgs := []string{
|
||||
"-i", p.SSHKeyPath,
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "ConnectTimeout=10",
|
||||
p.sshTarget(),
|
||||
cmd,
|
||||
}
|
||||
|
||||
sshCmd := exec.CommandContext(ctx, "ssh", sshArgs...)
|
||||
return sshCmd.Run()
|
||||
}
|
||||
|
||||
// remoteCmdOutput runs a command and returns output
|
||||
func (p *Pipeline) remoteCmdOutput(ctx context.Context, cmd string) (string, error) {
|
||||
sshArgs := []string{
|
||||
"-i", p.SSHKeyPath,
|
||||
"-o", "StrictHostKeyChecking=no",
|
||||
"-o", "UserKnownHostsFile=/dev/null",
|
||||
"-o", "ConnectTimeout=10",
|
||||
p.sshTarget(),
|
||||
cmd,
|
||||
}
|
||||
|
||||
sshCmd := exec.CommandContext(ctx, "ssh", sshArgs...)
|
||||
output, err := sshCmd.Output()
|
||||
return string(output), err
|
||||
}
|
||||
63
internal/restore/wireguard.go
Normal file
63
internal/restore/wireguard.go
Normal file
@@ -0,0 +1,63 @@
|
||||
package restore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// runServices starts restored services
|
||||
func (p *Pipeline) runServices(ctx context.Context) error {
|
||||
// Start WireGuard interfaces
|
||||
if err := p.startWireGuard(ctx); err != nil {
|
||||
// WireGuard is optional, log but don't fail
|
||||
if p.Verbose {
|
||||
fmt.Printf(" WireGuard: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Start Docker
|
||||
if err := p.startDocker(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start Docker: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// startWireGuard enables and starts WireGuard interfaces
|
||||
func (p *Pipeline) startWireGuard(ctx context.Context) error {
|
||||
// Check if WireGuard configs exist
|
||||
checkCmd := "ls /etc/wireguard/*.conf 2>/dev/null | head -5"
|
||||
output, err := p.remoteCmdOutput(ctx, checkCmd)
|
||||
if err != nil || strings.TrimSpace(output) == "" {
|
||||
return fmt.Errorf("no WireGuard configs found")
|
||||
}
|
||||
|
||||
// Get interface names
|
||||
configs := strings.Split(strings.TrimSpace(output), "\n")
|
||||
for _, conf := range configs {
|
||||
if conf == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract interface name from path (e.g., /etc/wireguard/wg0.conf -> wg0)
|
||||
parts := strings.Split(conf, "/")
|
||||
filename := parts[len(parts)-1]
|
||||
iface := strings.TrimSuffix(filename, ".conf")
|
||||
|
||||
if p.Verbose {
|
||||
fmt.Printf(" Starting WireGuard interface: %s\n", iface)
|
||||
}
|
||||
|
||||
// Enable and start
|
||||
enableCmd := fmt.Sprintf("systemctl enable wg-quick@%s", iface)
|
||||
startCmd := fmt.Sprintf("systemctl start wg-quick@%s", iface)
|
||||
|
||||
p.remoteCmd(ctx, enableCmd)
|
||||
if err := p.remoteCmd(ctx, startCmd); err != nil {
|
||||
return fmt.Errorf("failed to start %s: %w", iface, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
108
internal/ui/dryrun.go
Normal file
108
internal/ui/dryrun.go
Normal file
@@ -0,0 +1,108 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// DryRun tracks dry-run mode operations
|
||||
type DryRun struct {
|
||||
Enabled bool
|
||||
Operations []DryRunOp
|
||||
}
|
||||
|
||||
// DryRunOp represents a single operation that would be performed
|
||||
type DryRunOp struct {
|
||||
Component string // e.g., "VM", "DNS", "Restore"
|
||||
Action string // e.g., "Create", "Update", "Delete"
|
||||
Description string
|
||||
}
|
||||
|
||||
// NewDryRun creates a dry-run tracker
|
||||
func NewDryRun(enabled bool) *DryRun {
|
||||
return &DryRun{
|
||||
Enabled: enabled,
|
||||
Operations: make([]DryRunOp, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// AddOperation records an operation
|
||||
func (d *DryRun) AddOperation(component, action, description string) {
|
||||
d.Operations = append(d.Operations, DryRunOp{
|
||||
Component: component,
|
||||
Action: action,
|
||||
Description: description,
|
||||
})
|
||||
}
|
||||
|
||||
// Print displays all recorded operations
|
||||
func (d *DryRun) Print() {
|
||||
if !d.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("\n" + HeaderLine("DRY RUN - No changes will be made"))
|
||||
fmt.Println()
|
||||
|
||||
if len(d.Operations) == 0 {
|
||||
fmt.Println("No operations would be performed.")
|
||||
return
|
||||
}
|
||||
|
||||
for i, op := range d.Operations {
|
||||
fmt.Printf("%d. [%s] %s: %s\n", i+1, op.Component, op.Action, op.Description)
|
||||
}
|
||||
|
||||
fmt.Println()
|
||||
fmt.Println("To execute these operations, run with --yes flag")
|
||||
}
|
||||
|
||||
// HeaderLine creates a formatted header
|
||||
func HeaderLine(title string) string {
|
||||
return fmt.Sprintf("=== %s ===", title)
|
||||
}
|
||||
|
||||
// TablePrint prints data as a simple table
|
||||
func TablePrint(headers []string, rows [][]string) {
|
||||
// Calculate column widths
|
||||
widths := make([]int, len(headers))
|
||||
for i, h := range headers {
|
||||
widths[i] = len(h)
|
||||
}
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i < len(widths) && len(cell) > widths[i] {
|
||||
widths[i] = len(cell)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Print header
|
||||
for i, h := range headers {
|
||||
fmt.Printf("%-*s ", widths[i], h)
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Print separator
|
||||
for i := range headers {
|
||||
fmt.Printf("%s ", repeat("-", widths[i]))
|
||||
}
|
||||
fmt.Println()
|
||||
|
||||
// Print rows
|
||||
for _, row := range rows {
|
||||
for i, cell := range row {
|
||||
if i < len(widths) {
|
||||
fmt.Printf("%-*s ", widths[i], cell)
|
||||
}
|
||||
}
|
||||
fmt.Println()
|
||||
}
|
||||
}
|
||||
|
||||
func repeat(s string, n int) string {
|
||||
result := ""
|
||||
for i := 0; i < n; i++ {
|
||||
result += s
|
||||
}
|
||||
return result
|
||||
}
|
||||
111
internal/ui/progress.go
Normal file
111
internal/ui/progress.go
Normal file
@@ -0,0 +1,111 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Progress tracks and displays progress
|
||||
type Progress struct {
|
||||
Total int
|
||||
Current int
|
||||
StartTime time.Time
|
||||
Message string
|
||||
}
|
||||
|
||||
// NewProgress creates a new progress tracker
|
||||
func NewProgress(total int, message string) *Progress {
|
||||
return &Progress{
|
||||
Total: total,
|
||||
Current: 0,
|
||||
StartTime: time.Now(),
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
// Increment advances progress by one
|
||||
func (p *Progress) Increment() {
|
||||
p.Current++
|
||||
p.Print()
|
||||
}
|
||||
|
||||
// SetMessage updates the current message
|
||||
func (p *Progress) SetMessage(msg string) {
|
||||
p.Message = msg
|
||||
}
|
||||
|
||||
// Print displays current progress
|
||||
func (p *Progress) Print() {
|
||||
elapsed := time.Since(p.StartTime)
|
||||
percent := float64(p.Current) / float64(p.Total) * 100
|
||||
|
||||
bar := p.bar(20)
|
||||
|
||||
fmt.Printf("\r[%s] %.1f%% (%d/%d) %s - %s ",
|
||||
bar, percent, p.Current, p.Total, elapsed.Round(time.Second), p.Message)
|
||||
}
|
||||
|
||||
// Complete marks progress as done
|
||||
func (p *Progress) Complete() {
|
||||
p.Current = p.Total
|
||||
elapsed := time.Since(p.StartTime)
|
||||
fmt.Printf("\r[%s] 100%% (%d/%d) %s - Complete\n",
|
||||
p.bar(20), p.Total, p.Total, elapsed.Round(time.Second))
|
||||
}
|
||||
|
||||
func (p *Progress) bar(width int) string {
|
||||
filled := int(float64(p.Current) / float64(p.Total) * float64(width))
|
||||
empty := width - filled
|
||||
|
||||
return strings.Repeat("█", filled) + strings.Repeat("░", empty)
|
||||
}
|
||||
|
||||
// Spinner displays a spinning indicator
|
||||
type Spinner struct {
|
||||
Message string
|
||||
stop chan bool
|
||||
done chan bool
|
||||
}
|
||||
|
||||
// NewSpinner creates a new spinner
|
||||
func NewSpinner(message string) *Spinner {
|
||||
return &Spinner{
|
||||
Message: message,
|
||||
stop: make(chan bool),
|
||||
done: make(chan bool),
|
||||
}
|
||||
}
|
||||
|
||||
// Start begins the spinner animation
|
||||
func (s *Spinner) Start() {
|
||||
go func() {
|
||||
frames := []string{"⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"}
|
||||
i := 0
|
||||
for {
|
||||
select {
|
||||
case <-s.stop:
|
||||
s.done <- true
|
||||
return
|
||||
default:
|
||||
fmt.Printf("\r%s %s", frames[i%len(frames)], s.Message)
|
||||
i++
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Stop stops the spinner
|
||||
func (s *Spinner) Stop() {
|
||||
s.stop <- true
|
||||
<-s.done
|
||||
fmt.Printf("\r%s\n", strings.Repeat(" ", len(s.Message)+5))
|
||||
}
|
||||
|
||||
// StopWithMessage stops spinner with a final message
|
||||
func (s *Spinner) StopWithMessage(msg string) {
|
||||
s.stop <- true
|
||||
<-s.done
|
||||
fmt.Printf("\r%s\n", msg)
|
||||
}
|
||||
100
internal/ui/prompts.go
Normal file
100
internal/ui/prompts.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package ui
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ConfirmAction asks for yes/no confirmation
|
||||
func ConfirmAction(message string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Printf("%s [y/N]: ", message)
|
||||
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
response = strings.TrimSpace(strings.ToLower(response))
|
||||
return response == "y" || response == "yes"
|
||||
}
|
||||
|
||||
// ConfirmHostname requires typing the hostname to confirm
|
||||
func ConfirmHostname(hostname string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Printf("\n⚠️ DESTRUCTIVE OPERATION ⚠️\n")
|
||||
fmt.Printf("This will modify DNS for: %s\n", hostname)
|
||||
fmt.Printf("Type the hostname exactly to confirm: ")
|
||||
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
response = strings.TrimSpace(response)
|
||||
return response == hostname
|
||||
}
|
||||
|
||||
// ConfirmRecovery requires confirmation for recovery operation
|
||||
func ConfirmRecovery(host, source, target string) bool {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Printf("\n=== RECOVERY CONFIRMATION ===\n")
|
||||
fmt.Printf("Host: %s\n", host)
|
||||
fmt.Printf("Source: %s\n", source)
|
||||
fmt.Printf("Target: %s\n", target)
|
||||
fmt.Printf("\nThis will create a new VM and restore data.\n")
|
||||
fmt.Printf("Type 'RECOVER' to proceed: ")
|
||||
|
||||
response, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
response = strings.TrimSpace(response)
|
||||
return response == "RECOVER"
|
||||
}
|
||||
|
||||
// SelectOption presents options and returns selection
|
||||
func SelectOption(prompt string, options []string) (int, error) {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
fmt.Println(prompt)
|
||||
for i, opt := range options {
|
||||
fmt.Printf(" %d. %s\n", i+1, opt)
|
||||
}
|
||||
fmt.Print("Selection: ")
|
||||
|
||||
var selection int
|
||||
_, err := fmt.Fscanf(reader, "%d\n", &selection)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
if selection < 1 || selection > len(options) {
|
||||
return -1, fmt.Errorf("invalid selection")
|
||||
}
|
||||
|
||||
return selection - 1, nil
|
||||
}
|
||||
|
||||
// PrintError prints an error message in red
|
||||
func PrintError(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[31mError: "+format+"\033[0m\n", args...)
|
||||
}
|
||||
|
||||
// PrintSuccess prints a success message in green
|
||||
func PrintSuccess(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[32m✓ "+format+"\033[0m\n", args...)
|
||||
}
|
||||
|
||||
// PrintWarning prints a warning message in yellow
|
||||
func PrintWarning(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[33m⚠ "+format+"\033[0m\n", args...)
|
||||
}
|
||||
|
||||
// PrintInfo prints an info message in blue
|
||||
func PrintInfo(format string, args ...interface{}) {
|
||||
fmt.Printf("\033[34mℹ "+format+"\033[0m\n", args...)
|
||||
}
|
||||
Reference in New Issue
Block a user