From 1108bf3ef68648c67019ade29b47ef06e8267d0c Mon Sep 17 00:00:00 2001
From: GuilhermeStrice <15857393+GuilhermeStrice@users.noreply.github.com>
Date: Wed, 9 Jul 2025 19:24:12 +0100
Subject: [PATCH] add project
---
DatabaseSnapshots.csproj | 22 +
Models/Configuration.cs | 143 +++
Models/ConfigurationValidation.cs | 252 ++++
Models/DataModels.cs | 242 ++++
Models/InputValidation.cs | 438 +++++++
Program.cs | 1087 +++++++++++++++++
README.md | 297 +++++
Services/BinlogReader.cs | 345 ++++++
Services/EncryptionService.cs | 324 +++++
Services/EventStore.cs | 140 +++
Services/OptimizedFileService.cs | 172 +++
Services/RecoveryService.cs | 599 +++++++++
Services/SnapshotService.cs | 633 ++++++++++
appsettings.test.json | 49 +
events_test/events_1751237015.json | 0
events_test/events_1751237031.json | 0
.../test_incremental_restore.json | 9 +
17 files changed, 4752 insertions(+)
create mode 100644 DatabaseSnapshots.csproj
create mode 100644 Models/Configuration.cs
create mode 100644 Models/ConfigurationValidation.cs
create mode 100644 Models/DataModels.cs
create mode 100644 Models/InputValidation.cs
create mode 100644 Program.cs
create mode 100644 Services/BinlogReader.cs
create mode 100644 Services/EncryptionService.cs
create mode 100644 Services/EventStore.cs
create mode 100644 Services/OptimizedFileService.cs
create mode 100644 Services/RecoveryService.cs
create mode 100644 Services/SnapshotService.cs
create mode 100644 appsettings.test.json
create mode 100644 events_test/events_1751237015.json
create mode 100644 events_test/events_1751237031.json
create mode 100644 events_test/recovery_points/test_incremental_restore.json
diff --git a/DatabaseSnapshots.csproj b/DatabaseSnapshots.csproj
new file mode 100644
index 0000000..06799f1
--- /dev/null
+++ b/DatabaseSnapshots.csproj
@@ -0,0 +1,22 @@
+
+
+
+ Exe
+ net9.0
+ enable
+ enable
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/Models/Configuration.cs b/Models/Configuration.cs
new file mode 100644
index 0000000..8c4d084
--- /dev/null
+++ b/Models/Configuration.cs
@@ -0,0 +1,143 @@
+using System.Text.Json.Serialization;
+
+namespace DatabaseSnapshotsService.Models
+{
+ public class SnapshotConfiguration
+ {
+ [JsonPropertyName("connectionString")]
+ public string ConnectionString { get; set; } = "Server=localhost;Database=trading;Uid=root;Pwd=password;";
+
+ [JsonPropertyName("binlogReader")]
+ public BinlogReaderConfig BinlogReader { get; set; } = new();
+
+ [JsonPropertyName("snapshotStorage")]
+ public SnapshotStorageConfig SnapshotStorage { get; set; } = new();
+
+ [JsonPropertyName("eventStore")]
+ public EventStoreConfig EventStore { get; set; } = new();
+
+ [JsonPropertyName("security")]
+ public SecurityConfig Security { get; set; } = new();
+ }
+
+ public class SecurityConfig
+ {
+ [JsonPropertyName("encryption")]
+ public bool Encryption { get; set; } = false;
+
+ [JsonPropertyName("encryptionKey")]
+ public string? EncryptionKey { get; set; }
+ }
+
+ public class BinlogReaderConfig
+ {
+ [JsonPropertyName("host")]
+ public string Host { get; set; } = "localhost";
+
+ [JsonPropertyName("port")]
+ public int Port { get; set; } = 3306;
+
+ [JsonPropertyName("username")]
+ public string Username { get; set; } = "binlog_reader";
+
+ [JsonPropertyName("password")]
+ public string Password { get; set; } = "secure_password";
+
+ [JsonPropertyName("serverId")]
+ public int ServerId { get; set; } = 999;
+
+ [JsonPropertyName("startPosition")]
+ public long StartPosition { get; set; } = 4;
+
+ [JsonPropertyName("heartbeatInterval")]
+ public int HeartbeatInterval { get; set; } = 30;
+ }
+
+ public class SnapshotStorageConfig
+ {
+ [JsonPropertyName("path")]
+ public string Path { get; set; } = "./snapshots";
+
+ [JsonPropertyName("compression")]
+ public bool Compression { get; set; } = true;
+
+ [JsonPropertyName("retentionDays")]
+ public int RetentionDays { get; set; } = 30;
+
+ [JsonPropertyName("maxFileSize")]
+ public long MaxFileSize { get; set; } = 100 * 1024 * 1024; // 100MB
+
+ [JsonPropertyName("dumpOptimizations")]
+ public DumpOptimizationConfig DumpOptimizations { get; set; } = new();
+ }
+
+ public class DumpOptimizationConfig
+ {
+ [JsonPropertyName("singleTransaction")]
+ public bool SingleTransaction { get; set; } = true;
+
+ [JsonPropertyName("includeRoutines")]
+ public bool IncludeRoutines { get; set; } = true;
+
+ [JsonPropertyName("includeTriggers")]
+ public bool IncludeTriggers { get; set; } = true;
+
+ [JsonPropertyName("includeEvents")]
+ public bool IncludeEvents { get; set; } = true;
+
+ [JsonPropertyName("extendedInsert")]
+ public bool ExtendedInsert { get; set; } = true;
+
+ [JsonPropertyName("completeInsert")]
+ public bool CompleteInsert { get; set; } = true;
+
+ [JsonPropertyName("hexBlob")]
+ public bool HexBlob { get; set; } = true;
+
+ [JsonPropertyName("netBufferLength")]
+ public int NetBufferLength { get; set; } = 16384;
+
+ [JsonPropertyName("maxAllowedPacket")]
+ public string MaxAllowedPacket { get; set; } = "1G";
+
+ [JsonPropertyName("excludeTables")]
+ public List ExcludeTables { get; set; } = new();
+
+ [JsonPropertyName("includeTables")]
+ public List IncludeTables { get; set; } = new();
+
+ // New options
+ [JsonPropertyName("quick")]
+ public bool Quick { get; set; } = true;
+ [JsonPropertyName("orderByPrimary")]
+ public bool OrderByPrimary { get; set; } = true;
+ [JsonPropertyName("flushLogs")]
+ public bool FlushLogs { get; set; } = true;
+ [JsonPropertyName("masterData")]
+ public int MasterData { get; set; } = 2;
+ [JsonPropertyName("compact")]
+ public bool Compact { get; set; } = false;
+ [JsonPropertyName("noAutocommit")]
+ public bool NoAutocommit { get; set; } = false;
+ [JsonPropertyName("lockTables")]
+ public bool LockTables { get; set; } = false;
+ }
+
+ public class EventStoreConfig
+ {
+ [JsonPropertyName("path")]
+ public string Path { get; set; } = "./events";
+
+ [JsonPropertyName("maxFileSize")]
+ public long MaxFileSize { get; set; } = 50 * 1024 * 1024; // 50MB
+
+ [JsonPropertyName("retentionDays")]
+ public int RetentionDays { get; set; } = 90;
+
+ [JsonPropertyName("batchSize")]
+ public int BatchSize { get; set; } = 1000;
+
+ [JsonPropertyName("flushInterval")]
+ public int FlushInterval { get; set; } = 5; // seconds
+ }
+}
\ No newline at end of file
diff --git a/Models/ConfigurationValidation.cs b/Models/ConfigurationValidation.cs
new file mode 100644
index 0000000..814ca02
--- /dev/null
+++ b/Models/ConfigurationValidation.cs
@@ -0,0 +1,252 @@
+using System.ComponentModel.DataAnnotations;
+using System.Text.RegularExpressions;
+
+namespace DatabaseSnapshotsService.Models
+{
+ public static class ConfigurationValidation
+ {
+ public static List ValidateConfiguration(SnapshotConfiguration config)
+ {
+ var errors = new List();
+
+ if (config == null)
+ {
+ errors.Add(new ValidationResult("Configuration cannot be null"));
+ return errors;
+ }
+
+ ValidateConnectionString(config.ConnectionString, errors);
+ ValidateBinlogReader(config.BinlogReader, errors);
+ ValidateSnapshotStorage(config.SnapshotStorage, errors);
+ ValidateEventStore(config.EventStore, errors);
+ ValidateSecurity(config.Security, errors);
+
+ return errors;
+ }
+
+ private static void ValidateConnectionString(string connectionString, List errors)
+ {
+ if (string.IsNullOrWhiteSpace(connectionString))
+ {
+ errors.Add(new ValidationResult("Connection string cannot be empty"));
+ return;
+ }
+
+ // Basic connection string validation
+ var requiredParams = new[] { "Server", "Database", "Uid", "Pwd" };
+ foreach (var param in requiredParams)
+ {
+ if (!connectionString.Contains($"{param}="))
+ {
+ errors.Add(new ValidationResult($"Connection string must contain {param} parameter"));
+ }
+ }
+
+ // Check for potentially dangerous patterns
+ var dangerousPatterns = new[]
+ {
+ @"--.*", // SQL comments
+ @";\s*DROP\s+", // DROP statements
+ @";\s*DELETE\s+", // DELETE statements
+ @";\s*TRUNCATE\s+", // TRUNCATE statements
+ @";\s*ALTER\s+", // ALTER statements
+ @";\s*CREATE\s+", // CREATE statements
+ };
+
+ foreach (var pattern in dangerousPatterns)
+ {
+ if (Regex.IsMatch(connectionString, pattern, RegexOptions.IgnoreCase))
+ {
+ errors.Add(new ValidationResult($"Connection string contains potentially dangerous SQL pattern: {pattern}"));
+ }
+ }
+ }
+
+ private static void ValidateBinlogReader(BinlogReaderConfig config, List errors)
+ {
+ if (string.IsNullOrWhiteSpace(config.Host))
+ {
+ errors.Add(new ValidationResult("Binlog reader host cannot be empty"));
+ }
+
+ if (config.Port < 1 || config.Port > 65535)
+ {
+ errors.Add(new ValidationResult("Binlog reader port must be between 1 and 65535"));
+ }
+
+ if (string.IsNullOrWhiteSpace(config.Username))
+ {
+ errors.Add(new ValidationResult("Binlog reader username cannot be empty"));
+ }
+
+ if (string.IsNullOrWhiteSpace(config.Password))
+ {
+ errors.Add(new ValidationResult("Binlog reader password cannot be empty"));
+ }
+
+ if (config.ServerId < 1 || config.ServerId > 4294967295)
+ {
+ errors.Add(new ValidationResult("Binlog reader server ID must be between 1 and 4294967295"));
+ }
+
+ if (config.StartPosition < 4)
+ {
+ errors.Add(new ValidationResult("Binlog reader start position must be at least 4"));
+ }
+
+ if (config.HeartbeatInterval < 1 || config.HeartbeatInterval > 3600)
+ {
+ errors.Add(new ValidationResult("Binlog reader heartbeat interval must be between 1 and 3600 seconds"));
+ }
+ }
+
+ private static void ValidateSnapshotStorage(SnapshotStorageConfig config, List errors)
+ {
+ if (string.IsNullOrWhiteSpace(config.Path))
+ {
+ errors.Add(new ValidationResult("Snapshot storage path cannot be empty"));
+ }
+ else
+ {
+ // Check for path traversal attempts
+ var normalizedPath = Path.GetFullPath(config.Path);
+ if (normalizedPath.Contains("..") || normalizedPath.Contains("~"))
+ {
+ errors.Add(new ValidationResult("Snapshot storage path contains invalid characters"));
+ }
+ }
+
+ if (config.RetentionDays < 1 || config.RetentionDays > 3650) // Max 10 years
+ {
+ errors.Add(new ValidationResult("Snapshot retention days must be between 1 and 3650"));
+ }
+
+ if (config.MaxFileSize < 1024 * 1024 || config.MaxFileSize > 10L * 1024 * 1024 * 1024) // 1MB to 10GB
+ {
+ errors.Add(new ValidationResult("Snapshot max file size must be between 1MB and 10GB"));
+ }
+
+ // Validate dump optimizations
+ ValidateDumpOptimizations(config.DumpOptimizations, errors);
+ }
+
+ private static void ValidateDumpOptimizations(DumpOptimizationConfig config, List errors)
+ {
+ if (config.NetBufferLength < 1024 || config.NetBufferLength > 1048576) // 1KB to 1MB
+ {
+ errors.Add(new ValidationResult("Net buffer length must be between 1024 and 1048576 bytes"));
+ }
+
+ // Validate max allowed packet format (e.g., "1G", "512M", "1024K")
+ if (!string.IsNullOrWhiteSpace(config.MaxAllowedPacket))
+ {
+ var packetPattern = @"^\d+[KMGT]?$";
+ if (!Regex.IsMatch(config.MaxAllowedPacket, packetPattern))
+ {
+ errors.Add(new ValidationResult("Max allowed packet must be in format: number[K|M|G|T] (e.g., '1G', '512M')"));
+ }
+ }
+
+ // Validate table names in exclude/include lists
+ foreach (var table in config.ExcludeTables.Concat(config.IncludeTables))
+ {
+ if (string.IsNullOrWhiteSpace(table))
+ {
+ errors.Add(new ValidationResult("Table names cannot be empty"));
+ }
+ else if (!Regex.IsMatch(table, @"^[a-zA-Z_][a-zA-Z0-9_]*$"))
+ {
+ errors.Add(new ValidationResult($"Invalid table name format: {table}"));
+ }
+ }
+
+ // Check for conflicts between include and exclude tables
+ var conflicts = config.IncludeTables.Intersect(config.ExcludeTables, StringComparer.OrdinalIgnoreCase);
+ if (conflicts.Any())
+ {
+ errors.Add(new ValidationResult($"Table(s) cannot be both included and excluded: {string.Join(", ", conflicts)}"));
+ }
+ }
+
+ private static void ValidateEventStore(EventStoreConfig config, List errors)
+ {
+ if (string.IsNullOrWhiteSpace(config.Path))
+ {
+ errors.Add(new ValidationResult("Event store path cannot be empty"));
+ }
+ else
+ {
+ // Check for path traversal attempts
+ var normalizedPath = Path.GetFullPath(config.Path);
+ if (normalizedPath.Contains("..") || normalizedPath.Contains("~"))
+ {
+ errors.Add(new ValidationResult("Event store path contains invalid characters"));
+ }
+ }
+
+ if (config.MaxFileSize < 1024 * 1024 || config.MaxFileSize > 5L * 1024 * 1024 * 1024) // 1MB to 5GB
+ {
+ errors.Add(new ValidationResult("Event store max file size must be between 1MB and 5GB"));
+ }
+
+ if (config.RetentionDays < 1 || config.RetentionDays > 3650) // Max 10 years
+ {
+ errors.Add(new ValidationResult("Event store retention days must be between 1 and 3650"));
+ }
+
+ if (config.BatchSize < 1 || config.BatchSize > 10000)
+ {
+ errors.Add(new ValidationResult("Event store batch size must be between 1 and 10000"));
+ }
+
+ if (config.FlushInterval < 1 || config.FlushInterval > 300) // 1 second to 5 minutes
+ {
+ errors.Add(new ValidationResult("Event store flush interval must be between 1 and 300 seconds"));
+ }
+ }
+
+ private static void ValidateSecurity(SecurityConfig config, List errors)
+ {
+ if (config.Encryption && string.IsNullOrWhiteSpace(config.EncryptionKey))
+ {
+ errors.Add(new ValidationResult("Encryption key is required when encryption is enabled"));
+ }
+
+ if (!string.IsNullOrWhiteSpace(config.EncryptionKey))
+ {
+ if (config.EncryptionKey.Length < 32)
+ {
+ errors.Add(new ValidationResult("Encryption key must be at least 32 characters long"));
+ }
+
+ // Check for weak encryption keys
+ if (IsWeakEncryptionKey(config.EncryptionKey))
+ {
+ errors.Add(new ValidationResult("Encryption key is too weak. Use a stronger key with mixed characters"));
+ }
+ }
+ }
+
+ private static bool IsWeakEncryptionKey(string key)
+ {
+ // Check for common weak patterns
+ if (key.Length < 32) return true;
+
+ // Check for repeated characters
+ if (Regex.IsMatch(key, @"(.)\1{10,}")) return true;
+
+ // Check for sequential characters
+ if (Regex.IsMatch(key, @"(?:abc|bcd|cde|def|efg|fgh|ghi|hij|ijk|jkl|klm|lmn|mno|nop|opq|pqr|qrs|rst|stu|tuv|uvw|vwx|wxy|xyz)", RegexOptions.IgnoreCase)) return true;
+
+ // Check for common weak keys
+ var weakKeys = new[]
+ {
+ "password", "123456", "qwerty", "admin", "root", "secret", "key",
+ "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
+ "12345678901234567890123456789012"
+ };
+
+ return weakKeys.Any(wk => key.Equals(wk, StringComparison.OrdinalIgnoreCase));
+ }
+ }
+}
\ No newline at end of file
diff --git a/Models/DataModels.cs b/Models/DataModels.cs
new file mode 100644
index 0000000..6ee14a2
--- /dev/null
+++ b/Models/DataModels.cs
@@ -0,0 +1,242 @@
+using System.Text.Json.Serialization;
+
+namespace DatabaseSnapshotsService.Models
+{
+ public class SnapshotInfo
+ {
+ [JsonPropertyName("id")]
+ public int Id { get; set; }
+
+ [JsonPropertyName("type")]
+ public string Type { get; set; } = string.Empty;
+
+ [JsonPropertyName("timestamp")]
+ public long Timestamp { get; set; }
+
+ [JsonPropertyName("dataSize")]
+ public long DataSize { get; set; }
+
+ [JsonPropertyName("status")]
+ public string Status { get; set; } = string.Empty;
+
+ [JsonPropertyName("description")]
+ public string? Description { get; set; }
+
+ [JsonPropertyName("userId")]
+ public int? UserId { get; set; }
+
+ [JsonPropertyName("createdAt")]
+ public DateTime CreatedAt { get; set; }
+
+ [JsonPropertyName("filePath")]
+ public string FilePath { get; set; } = string.Empty;
+
+ [JsonPropertyName("checksum")]
+ public string Checksum { get; set; } = string.Empty;
+
+ // Binlog fields for incremental snapshots
+ [JsonPropertyName("binlogFile")]
+ public string? BinlogFile { get; set; }
+
+ [JsonPropertyName("binlogPosition")]
+ public long? BinlogPosition { get; set; }
+
+ [JsonPropertyName("parentSnapshotId")]
+ public int? ParentSnapshotId { get; set; }
+
+ [JsonPropertyName("incrementalBinlogStartFile")]
+ public string? IncrementalBinlogStartFile { get; set; }
+
+ [JsonPropertyName("incrementalBinlogStartPosition")]
+ public long? IncrementalBinlogStartPosition { get; set; }
+
+ [JsonPropertyName("incrementalBinlogEndFile")]
+ public string? IncrementalBinlogEndFile { get; set; }
+
+ [JsonPropertyName("incrementalBinlogEndPosition")]
+ public long? IncrementalBinlogEndPosition { get; set; }
+ }
+
+ public class DatabaseEvent
+ {
+ [JsonPropertyName("id")]
+ public long Id { get; set; }
+
+ [JsonPropertyName("timestamp")]
+ public long Timestamp { get; set; }
+
+ [JsonPropertyName("type")]
+ public string Type { get; set; } = string.Empty;
+
+ [JsonPropertyName("table")]
+ public string Table { get; set; } = string.Empty;
+
+ [JsonPropertyName("operation")]
+ public string Operation { get; set; } = string.Empty;
+
+ [JsonPropertyName("data")]
+ public string Data { get; set; } = string.Empty;
+
+ [JsonPropertyName("binlogPosition")]
+ public long BinlogPosition { get; set; }
+
+ [JsonPropertyName("serverId")]
+ public int ServerId { get; set; }
+
+ [JsonPropertyName("checksum")]
+ public string Checksum { get; set; } = string.Empty;
+ }
+
+ public class RecoveryPoint
+ {
+ [JsonPropertyName("id")]
+ public int Id { get; set; }
+
+ [JsonPropertyName("name")]
+ public string Name { get; set; } = string.Empty;
+
+ [JsonPropertyName("timestamp")]
+ public long Timestamp { get; set; }
+
+ [JsonPropertyName("description")]
+ public string? Description { get; set; }
+
+ [JsonPropertyName("eventCount")]
+ public long EventCount { get; set; }
+
+ [JsonPropertyName("createdAt")]
+ public DateTime CreatedAt { get; set; }
+
+ [JsonPropertyName("lastEventId")]
+ public long LastEventId { get; set; }
+ }
+
+ public class ServiceStatus
+ {
+ [JsonPropertyName("status")]
+ public string Status { get; set; } = "Unknown";
+
+ [JsonPropertyName("databaseConnected")]
+ public bool DatabaseConnected { get; set; }
+
+ [JsonPropertyName("binlogReaderStatus")]
+ public string BinlogReaderStatus { get; set; } = "Unknown";
+
+ [JsonPropertyName("lastEventProcessed")]
+ public long LastEventProcessed { get; set; }
+
+ [JsonPropertyName("totalEvents")]
+ public long TotalEvents { get; set; }
+
+ [JsonPropertyName("activeSnapshots")]
+ public int ActiveSnapshots { get; set; }
+
+ [JsonPropertyName("uptime")]
+ public TimeSpan Uptime { get; set; }
+
+ [JsonPropertyName("lastSnapshot")]
+ public DateTime? LastSnapshot { get; set; }
+ }
+
+ public class HealthStatus
+ {
+ [JsonPropertyName("isHealthy")]
+ public bool IsHealthy { get; set; }
+
+ [JsonPropertyName("errorMessage")]
+ public string? ErrorMessage { get; set; }
+
+ [JsonPropertyName("checks")]
+ public Dictionary Checks { get; set; } = new();
+
+ [JsonPropertyName("timestamp")]
+ public DateTime Timestamp { get; set; } = DateTime.UtcNow;
+ }
+
+ public class RestorePreview
+ {
+ [JsonPropertyName("targetTimestamp")]
+ public long TargetTimestamp { get; set; }
+
+ [JsonPropertyName("eventCount")]
+ public long EventCount { get; set; }
+
+ [JsonPropertyName("affectedTables")]
+ public List AffectedTables { get; set; } = new();
+
+ [JsonPropertyName("estimatedDuration")]
+ public TimeSpan EstimatedDuration { get; set; }
+
+ [JsonPropertyName("snapshotId")]
+ public int? SnapshotId { get; set; }
+
+ [JsonPropertyName("warnings")]
+ public List Warnings { get; set; } = new();
+ }
+
+ public class SnapshotMetadata
+ {
+ [JsonPropertyName("version")]
+ public string Version { get; set; } = "1.0";
+
+ [JsonPropertyName("createdAt")]
+ public DateTime CreatedAt { get; set; }
+
+ [JsonPropertyName("databaseVersion")]
+ public string DatabaseVersion { get; set; } = string.Empty;
+
+ [JsonPropertyName("tables")]
+ public List Tables { get; set; } = new();
+
+ [JsonPropertyName("checksum")]
+ public string Checksum { get; set; } = string.Empty;
+
+ [JsonPropertyName("compression")]
+ public bool Compression { get; set; }
+
+ [JsonPropertyName("encryption")]
+ public bool Encryption { get; set; }
+ }
+
+ public class TableInfo
+ {
+ [JsonPropertyName("name")]
+ public string Name { get; set; } = string.Empty;
+
+ [JsonPropertyName("rowCount")]
+ public long RowCount { get; set; }
+
+ [JsonPropertyName("dataSize")]
+ public long DataSize { get; set; }
+
+ [JsonPropertyName("indexSize")]
+ public long IndexSize { get; set; }
+
+ [JsonPropertyName("checksum")]
+ public string Checksum { get; set; } = string.Empty;
+ }
+
+ public enum SnapshotType
+ {
+ Full,
+ Trading,
+ User,
+ Incremental
+ }
+
+ public enum EventOperation
+ {
+ Insert,
+ Update,
+ Delete,
+ Truncate
+ }
+
+ public enum SnapshotStatus
+ {
+ Creating,
+ Completed,
+ Failed,
+ Corrupted
+ }
+}
\ No newline at end of file
diff --git a/Models/InputValidation.cs b/Models/InputValidation.cs
new file mode 100644
index 0000000..aa8ff56
--- /dev/null
+++ b/Models/InputValidation.cs
@@ -0,0 +1,438 @@
+using System.ComponentModel.DataAnnotations;
+using System.Text.RegularExpressions;
+
+namespace DatabaseSnapshotsService.Models
+{
+ public static class InputValidation
+ {
+ // Validation patterns
+ private static readonly Regex ValidFileNamePattern = new(@"^[a-zA-Z0-9._-]+$", RegexOptions.Compiled);
+ private static readonly Regex ValidPathPattern = new(@"^[a-zA-Z0-9/._-]+$", RegexOptions.Compiled);
+ private static readonly Regex ValidNamePattern = new(@"^[a-zA-Z0-9\s._-]{1,100}$", RegexOptions.Compiled);
+ private static readonly Regex ValidDescriptionPattern = new(@"^[a-zA-Z0-9\s.,!?@#$%^&*()_+-=:;'""<>/\\|`~]{0,500}$", RegexOptions.Compiled);
+ private static readonly Regex ValidTableNamePattern = new(@"^[a-zA-Z][a-zA-Z0-9_]*$", RegexOptions.Compiled);
+ private static readonly Regex ValidOperationPattern = new(@"^(insert|update|delete|truncate|query)$", RegexOptions.IgnoreCase | RegexOptions.Compiled);
+ private static readonly Regex ValidEventTypePattern = new(@"^(binlog|status|error|info)$", RegexOptions.IgnoreCase | RegexOptions.Compiled);
+
+ // Dangerous patterns to detect
+ private static readonly Regex[] DangerousPatterns = {
+ new(@"\.\./", RegexOptions.Compiled), // Path traversal
+ new(@"\.\.\\", RegexOptions.Compiled), // Windows path traversal
+ new(@"[<>""'&]", RegexOptions.Compiled), // HTML/XML injection
+ new(@"(union|select|insert|update|delete|drop|create|alter|exec|execute|script|javascript|vbscript|onload|onerror)", RegexOptions.IgnoreCase | RegexOptions.Compiled), // SQL/script injection
+ new(@"(\x00|\x01|\x02|\x03|\x04|\x05|\x06|\x07|\x08|\x0B|\x0C|\x0E|\x0F|\x10|\x11|\x12|\x13|\x14|\x15|\x16|\x17|\x18|\x19|\x1A|\x1B|\x1C|\x1D|\x1E|\x1F)", RegexOptions.Compiled), // Control characters
+ };
+
+ public static class SnapshotValidation
+ {
+ public static ValidationResult ValidateSnapshotName(string name)
+ {
+ if (string.IsNullOrWhiteSpace(name))
+ {
+ return new ValidationResult("Snapshot name cannot be empty");
+ }
+
+ if (name.Length > 100)
+ {
+ return new ValidationResult("Snapshot name cannot exceed 100 characters");
+ }
+
+ if (!ValidNamePattern.IsMatch(name))
+ {
+ return new ValidationResult("Snapshot name contains invalid characters. Use only letters, numbers, spaces, dots, underscores, and hyphens");
+ }
+
+ if (ContainsDangerousPatterns(name))
+ {
+ return new ValidationResult("Snapshot name contains potentially dangerous content");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateSnapshotDescription(string? description)
+ {
+ if (string.IsNullOrWhiteSpace(description))
+ {
+ return ValidationResult.Success!; // Description is optional
+ }
+
+ if (description.Length > 500)
+ {
+ return new ValidationResult("Snapshot description cannot exceed 500 characters");
+ }
+
+ if (!ValidDescriptionPattern.IsMatch(description))
+ {
+ return new ValidationResult("Snapshot description contains invalid characters");
+ }
+
+ if (ContainsDangerousPatterns(description))
+ {
+ return new ValidationResult("Snapshot description contains potentially dangerous content");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateSnapshotId(int id)
+ {
+ if (id <= 0)
+ {
+ return new ValidationResult("Snapshot ID must be a positive integer");
+ }
+
+ if (id > int.MaxValue)
+ {
+ return new ValidationResult("Snapshot ID is too large");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateSnapshotType(string type)
+ {
+ if (string.IsNullOrWhiteSpace(type))
+ {
+ return new ValidationResult("Snapshot type cannot be empty");
+ }
+
+ var validTypes = new[] { "full", "incremental", "trading", "user" };
+ if (!validTypes.Contains(type.ToLowerInvariant()))
+ {
+ return new ValidationResult($"Invalid snapshot type. Must be one of: {string.Join(", ", validTypes)}");
+ }
+
+ return ValidationResult.Success!;
+ }
+ }
+
+ public static class EventValidation
+ {
+ public static ValidationResult ValidateTableName(string tableName)
+ {
+ if (string.IsNullOrWhiteSpace(tableName))
+ {
+ return new ValidationResult("Table name cannot be empty");
+ }
+
+ if (tableName.Length > 64)
+ {
+ return new ValidationResult("Table name cannot exceed 64 characters");
+ }
+
+ if (!ValidTableNamePattern.IsMatch(tableName))
+ {
+ return new ValidationResult("Table name contains invalid characters. Use only letters, numbers, and underscores, starting with a letter");
+ }
+
+ if (ContainsDangerousPatterns(tableName))
+ {
+ return new ValidationResult("Table name contains potentially dangerous content");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateOperation(string operation)
+ {
+ if (string.IsNullOrWhiteSpace(operation))
+ {
+ return new ValidationResult("Operation cannot be empty");
+ }
+
+ if (!ValidOperationPattern.IsMatch(operation))
+ {
+ return new ValidationResult("Invalid operation. Must be one of: insert, update, delete, truncate, query");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateEventType(string eventType)
+ {
+ if (string.IsNullOrWhiteSpace(eventType))
+ {
+ return new ValidationResult("Event type cannot be empty");
+ }
+
+ if (!ValidEventTypePattern.IsMatch(eventType))
+ {
+ return new ValidationResult("Invalid event type. Must be one of: binlog, status, error, info");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateEventData(string data)
+ {
+ if (string.IsNullOrWhiteSpace(data))
+ {
+ return new ValidationResult("Event data cannot be empty");
+ }
+
+ if (data.Length > 10000) // 10KB limit
+ {
+ return new ValidationResult("Event data cannot exceed 10KB");
+ }
+
+ if (ContainsDangerousPatterns(data))
+ {
+ return new ValidationResult("Event data contains potentially dangerous content");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateLimit(int limit)
+ {
+ if (limit <= 0)
+ {
+ return new ValidationResult("Limit must be a positive integer");
+ }
+
+ if (limit > 10000)
+ {
+ return new ValidationResult("Limit cannot exceed 10000");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateTimestamp(long timestamp)
+ {
+ if (timestamp < 0)
+ {
+ return new ValidationResult("Timestamp cannot be negative");
+ }
+
+ var maxTimestamp = DateTimeOffset.MaxValue.ToUnixTimeSeconds();
+ if (timestamp > maxTimestamp)
+ {
+ return new ValidationResult("Timestamp is too far in the future");
+ }
+
+ return ValidationResult.Success!;
+ }
+ }
+
+ public static class RecoveryValidation
+ {
+ public static ValidationResult ValidateRecoveryPointName(string name)
+ {
+ if (string.IsNullOrWhiteSpace(name))
+ {
+ return new ValidationResult("Recovery point name cannot be empty");
+ }
+
+ if (name.Length > 100)
+ {
+ return new ValidationResult("Recovery point name cannot exceed 100 characters");
+ }
+
+ if (!ValidNamePattern.IsMatch(name))
+ {
+ return new ValidationResult("Recovery point name contains invalid characters. Use only letters, numbers, spaces, dots, underscores, and hyphens");
+ }
+
+ if (ContainsDangerousPatterns(name))
+ {
+ return new ValidationResult("Recovery point name contains potentially dangerous content");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateRecoveryPointDescription(string? description)
+ {
+ if (string.IsNullOrWhiteSpace(description))
+ {
+ return ValidationResult.Success!; // Description is optional
+ }
+
+ if (description.Length > 500)
+ {
+ return new ValidationResult("Recovery point description cannot exceed 500 characters");
+ }
+
+ if (!ValidDescriptionPattern.IsMatch(description))
+ {
+ return new ValidationResult("Recovery point description contains invalid characters");
+ }
+
+ if (ContainsDangerousPatterns(description))
+ {
+ return new ValidationResult("Recovery point description contains potentially dangerous content");
+ }
+
+ return ValidationResult.Success!;
+ }
+ }
+
+ public static class FileValidation
+ {
+ public static ValidationResult ValidateFilePath(string filePath)
+ {
+ if (string.IsNullOrWhiteSpace(filePath))
+ {
+ return new ValidationResult("File path cannot be empty");
+ }
+
+ if (filePath.Length > 260) // Windows path limit
+ {
+ return new ValidationResult("File path cannot exceed 260 characters");
+ }
+
+ if (!ValidPathPattern.IsMatch(filePath))
+ {
+ return new ValidationResult("File path contains invalid characters");
+ }
+
+ if (ContainsDangerousPatterns(filePath))
+ {
+ return new ValidationResult("File path contains potentially dangerous content");
+ }
+
+ // Check for path traversal attempts
+ var normalizedPath = Path.GetFullPath(filePath);
+ if (normalizedPath.Contains("..") || normalizedPath.Contains("~"))
+ {
+ return new ValidationResult("File path contains invalid path traversal characters");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateFileName(string fileName)
+ {
+ if (string.IsNullOrWhiteSpace(fileName))
+ {
+ return new ValidationResult("File name cannot be empty");
+ }
+
+ if (fileName.Length > 255)
+ {
+ return new ValidationResult("File name cannot exceed 255 characters");
+ }
+
+ if (!ValidFileNamePattern.IsMatch(fileName))
+ {
+ return new ValidationResult("File name contains invalid characters. Use only letters, numbers, dots, underscores, and hyphens");
+ }
+
+ if (ContainsDangerousPatterns(fileName))
+ {
+ return new ValidationResult("File name contains potentially dangerous content");
+ }
+
+ // Check for reserved Windows filenames
+ var reservedNames = new[]
+ {
+ "CON", "PRN", "AUX", "NUL", "COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
+ "LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9"
+ };
+
+ var fileNameWithoutExtension = Path.GetFileNameWithoutExtension(fileName).ToUpperInvariant();
+ if (reservedNames.Contains(fileNameWithoutExtension))
+ {
+ return new ValidationResult("File name is a reserved system name");
+ }
+
+ return ValidationResult.Success!;
+ }
+ }
+
+ public static class DataValidation
+ {
+ public static ValidationResult ValidateDataContains(string dataContains)
+ {
+ if (string.IsNullOrWhiteSpace(dataContains))
+ {
+ return new ValidationResult("Data contains filter cannot be empty");
+ }
+
+ if (dataContains.Length > 1000)
+ {
+ return new ValidationResult("Data contains filter cannot exceed 1000 characters");
+ }
+
+ if (ContainsDangerousPatterns(dataContains))
+ {
+ return new ValidationResult("Data contains filter contains potentially dangerous content");
+ }
+
+ return ValidationResult.Success!;
+ }
+
+ public static ValidationResult ValidateOutputFile(string outputFile)
+ {
+ if (string.IsNullOrWhiteSpace(outputFile))
+ {
+ return new ValidationResult("Output file path cannot be empty");
+ }
+
+ return FileValidation.ValidateFilePath(outputFile);
+ }
+ }
+
+ private static bool ContainsDangerousPatterns(string input)
+ {
+ return DangerousPatterns.Any(pattern => pattern.IsMatch(input));
+ }
+
+ public static string SanitizeString(string input)
+ {
+ if (string.IsNullOrWhiteSpace(input))
+ {
+ return string.Empty;
+ }
+
+ // Remove control characters
+ var sanitized = Regex.Replace(input, @"[\x00-\x1F\x7F]", "");
+
+ // Trim whitespace
+ sanitized = sanitized.Trim();
+
+ return sanitized;
+ }
+
+ public static string SanitizeFileName(string fileName)
+ {
+ if (string.IsNullOrWhiteSpace(fileName))
+ {
+ return string.Empty;
+ }
+
+ // Remove invalid characters for file names
+ var sanitized = Regex.Replace(fileName, @"[<>:""/\\|?*\x00-\x1F]", "");
+
+ // Trim and limit length
+ sanitized = sanitized.Trim();
+ if (sanitized.Length > 255)
+ {
+ sanitized = sanitized.Substring(0, 255);
+ }
+
+ return sanitized;
+ }
+
+ public static string SanitizePath(string path)
+ {
+ if (string.IsNullOrWhiteSpace(path))
+ {
+ return string.Empty;
+ }
+
+ // Remove path traversal attempts
+ var sanitized = path.Replace("..", "").Replace("~", "");
+
+ // Remove invalid characters
+ sanitized = Regex.Replace(sanitized, @"[<>""|?\x00-\x1F]", "");
+
+ // Normalize path separators
+ sanitized = sanitized.Replace('\\', '/');
+
+ return sanitized;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Program.cs b/Program.cs
new file mode 100644
index 0000000..e7ccb71
--- /dev/null
+++ b/Program.cs
@@ -0,0 +1,1087 @@
+using System;
+using System.IO;
+using System.Text.Json;
+using CommandLine;
+using Crayon;
+using DatabaseSnapshotsService.Models;
+using DatabaseSnapshotsService.Services;
+using Microsoft.Extensions.Configuration;
+using System.Threading.Tasks;
+using System.Text;
+using System.Linq;
+using System.Collections.Generic;
+using System.ComponentModel.DataAnnotations;
+using MySqlConnector;
+
+namespace DatabaseSnapshotsService
+{
+ class Program
+ {
+ static async Task Main(string[] args)
+ {
+ var parser = new Parser(settings =>
+ {
+ settings.CaseSensitive = false;
+ settings.HelpWriter = Console.Out;
+ });
+
+ var result = parser.ParseArguments(args);
+
+ return await result.MapResult(
+ (SnapshotOptions opts) => HandleSnapshotCommand(opts),
+ (RecoveryOptions opts) => HandleRecoveryCommand(opts),
+ (ConfigOptions opts) => HandleConfigCommand(opts),
+ (BinlogOptions opts) => HandleBinlogCommand(opts),
+ (EventsOptions opts) => HandleEventsCommand(opts),
+ (RestoreOptions opts) => HandleRestoreCommand(opts),
+ errors => Task.FromResult(1)
+ );
+ }
+
+ static async Task HandleSnapshotCommand(SnapshotOptions options)
+ {
+ try
+ {
+ // Validate and sanitize command input
+ var sanitizedCommand = InputValidation.SanitizeString(options.Command);
+ if (string.IsNullOrWhiteSpace(sanitizedCommand))
+ {
+ Output.Red("Error: Command cannot be empty");
+ return 1;
+ }
+
+ var config = LoadConfiguration(options.ConfigFile);
+ var snapshotService = new SnapshotService(config);
+
+ switch (sanitizedCommand.ToLower())
+ {
+ case "create":
+ // Validate snapshot name
+ var nameValidation = InputValidation.SnapshotValidation.ValidateSnapshotName(options.Name);
+ if (nameValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {nameValidation.ErrorMessage}");
+ return 1;
+ }
+
+ // Validate snapshot type
+ var typeValidation = InputValidation.SnapshotValidation.ValidateSnapshotType(options.Type);
+ if (typeValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {typeValidation.ErrorMessage}");
+ return 1;
+ }
+
+ if (options.Type.ToLower() == "incremental")
+ {
+ Output.Green($"Creating incremental snapshot: {options.Name}");
+ var snapshot = await snapshotService.CreateIncrementalSnapshotAsync(options.Name);
+ Output.Green($"Incremental snapshot created successfully: {snapshot.Id}");
+ Output.Yellow($"File: {snapshot.FilePath}");
+ Output.Yellow($"Size: {snapshot.DataSize} bytes");
+ }
+ else
+ {
+ Output.Green($"Creating snapshot: {options.Name}");
+ var snapshot = await snapshotService.CreateSnapshotAsync(options.Name);
+ Output.Green($"Snapshot created successfully: {snapshot.Id}");
+ Output.Yellow($"File: {snapshot.FilePath}");
+ Output.Yellow($"Size: {snapshot.DataSize} bytes");
+ }
+ break;
+
+ case "list":
+ var snapshots = await snapshotService.ListSnapshotsAsync();
+ Console.WriteLine("Available snapshots:");
+
+ // Group snapshots by type and sort by timestamp
+ var fullSnapshots = snapshots.Where(s => s.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
+ .OrderByDescending(s => s.CreatedAt)
+ .ToList();
+
+ var incrementalSnapshots = snapshots.Where(s => s.Type.Equals("Incremental", StringComparison.OrdinalIgnoreCase))
+ .OrderByDescending(s => s.CreatedAt)
+ .ToList();
+
+ // Display full snapshots first
+ foreach (var snapshot in fullSnapshots)
+ {
+ var timestamp = snapshot.CreatedAt.ToString("yyyy-MM-dd HH:mm:ss");
+ var size = snapshot.DataSize switch
+ {
+ < 1024 => $"{snapshot.DataSize} bytes",
+ < 1024 * 1024 => $"{snapshot.DataSize / 1024:N0} KB",
+ _ => $"{snapshot.DataSize / (1024 * 1024):N1} MB"
+ };
+
+ Console.WriteLine($" {snapshot.Id}: {snapshot.Description} ({timestamp})");
+ Console.WriteLine($" File: {Path.GetFileName(snapshot.FilePath)}");
+ Console.WriteLine($" Size: {size}, Status: {snapshot.Status}");
+
+ // Find all incremental snapshots that belong to this full snapshot (directly or indirectly)
+ var allRelatedIncrementals = GetAllRelatedIncrementals(snapshot.Id, incrementalSnapshots)
+ .OrderByDescending(inc => inc.CreatedAt)
+ .ToList();
+
+ foreach (var inc in allRelatedIncrementals)
+ {
+ var incTimestamp = inc.CreatedAt.ToString("yyyy-MM-dd HH:mm:ss");
+ var incSize = inc.DataSize switch
+ {
+ < 1024 => $"{inc.DataSize} bytes",
+ < 1024 * 1024 => $"{inc.DataSize / 1024:N0} KB",
+ _ => $"{inc.DataSize / (1024 * 1024):N1} MB"
+ };
+
+ Console.WriteLine($" └─ {inc.Id}: {inc.Description} ({incTimestamp})");
+ Console.WriteLine($" File: {Path.GetFileName(inc.FilePath)}");
+ Console.WriteLine($" Size: {incSize}, Status: {inc.Status}");
+ }
+
+ Console.WriteLine();
+ }
+ break;
+
+ case "show":
+ // Validate snapshot ID
+ if (!int.TryParse(options.Name, out var showId))
+ {
+ Output.Red("Error: Invalid snapshot ID. Must be a positive integer.");
+ return 1;
+ }
+
+ var showIdValidation = InputValidation.SnapshotValidation.ValidateSnapshotId(showId);
+ if (showIdValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {showIdValidation.ErrorMessage}");
+ return 1;
+ }
+
+ Output.Green($"Snapshot details for: {options.Name}");
+ var snapshotDetails = await snapshotService.GetSnapshotAsync(showId);
+ if (snapshotDetails != null)
+ {
+ Output.White($" ID: {snapshotDetails.Id}");
+ Output.White($" Created: {snapshotDetails.CreatedAt:yyyy-MM-dd HH:mm:ss}");
+ Output.White($" File: {snapshotDetails.FilePath}");
+ Output.White($" Size: {snapshotDetails.DataSize:N0} bytes");
+ Output.White($" Status: {snapshotDetails.Status}");
+ }
+ else
+ {
+ Output.Red($"Snapshot '{options.Name}' not found");
+ return 1;
+ }
+ break;
+
+ case "delete":
+ // Validate snapshot ID
+ if (!int.TryParse(options.Name, out var deleteId))
+ {
+ Output.Red("Error: Invalid snapshot ID. Must be a positive integer.");
+ return 1;
+ }
+
+ var deleteIdValidation = InputValidation.SnapshotValidation.ValidateSnapshotId(deleteId);
+ if (deleteIdValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {deleteIdValidation.ErrorMessage}");
+ return 1;
+ }
+
+ Output.Yellow($"Deleting snapshot: {options.Name}");
+ await snapshotService.DeleteSnapshotAsync(deleteId);
+ Output.Green("Snapshot deleted successfully");
+ break;
+
+ default:
+ Output.Red($"Unknown snapshot command: {sanitizedCommand}");
+ return 1;
+ }
+
+ return 0;
+ }
+ catch (Exception ex)
+ {
+ Output.Red($"Error: {ex.Message}");
+ return 1;
+ }
+ }
+
+ static async Task HandleRecoveryCommand(RecoveryOptions options)
+ {
+ try
+ {
+ // Validate and sanitize command input
+ var sanitizedCommand = InputValidation.SanitizeString(options.Command);
+ if (string.IsNullOrWhiteSpace(sanitizedCommand))
+ {
+ Output.Red("Error: Command cannot be empty");
+ return 1;
+ }
+
+ var config = LoadConfiguration(options.ConfigFile);
+ var recoveryService = new RecoveryService(config);
+
+ switch (sanitizedCommand)
+ {
+ case "create-point":
+ // Validate recovery point name
+ var nameValidation = InputValidation.RecoveryValidation.ValidateRecoveryPointName(options.Name);
+ if (nameValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {nameValidation.ErrorMessage}");
+ return 1;
+ }
+
+ Output.Green($"Creating recovery point: {options.Name}");
+ var point = await recoveryService.CreateRecoveryPointAsync(options.Name);
+ Output.Green($"Recovery point created: {point.Id}");
+ Output.Yellow($"Name: {point.Name}");
+ Output.Yellow($"Timestamp: {point.CreatedAt:yyyy-MM-dd HH:mm:ss}");
+ break;
+
+ case "list-points":
+ Output.Green("Available recovery points:");
+ var points = await recoveryService.ListRecoveryPointsAsync();
+ foreach (var p in points)
+ {
+ Output.White($" {p.Id}: {p.Name} ({p.CreatedAt:yyyy-MM-dd HH:mm:ss})");
+ Output.Gray($" Event Count: {p.EventCount}");
+ }
+ break;
+
+ case "restore":
+ // Validate recovery point name
+ var restoreNameValidation = InputValidation.RecoveryValidation.ValidateRecoveryPointName(options.Name);
+ if (restoreNameValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {restoreNameValidation.ErrorMessage}");
+ return 1;
+ }
+
+ Output.Yellow($"Restoring to recovery point: {options.Name}");
+ if (options.DryRun)
+ {
+ Output.Green("DRY RUN - No actual restore performed");
+ var plan = await recoveryService.PreviewRestoreAsync(DateTimeOffset.UtcNow.ToUnixTimeSeconds());
+ Output.White($" Target Timestamp: {plan.TargetTimestamp}");
+ Output.White($" Event Count: {plan.EventCount}");
+ Output.White($" Affected Tables: {string.Join(", ", plan.AffectedTables)}");
+ }
+ else
+ {
+ await recoveryService.RestoreAsync(DateTimeOffset.UtcNow.ToUnixTimeSeconds());
+ Output.Green("Restore completed successfully");
+ }
+ break;
+
+ default:
+ Output.Red($"Unknown recovery command: {sanitizedCommand}");
+ return 1;
+ }
+
+ return 0;
+ }
+ catch (Exception ex)
+ {
+ Output.Red($"Error: {ex.Message}");
+ return 1;
+ }
+ }
+
+ static async Task HandleConfigCommand(ConfigOptions options)
+ {
+ try
+ {
+ var config = LoadConfiguration(options.ConfigFile);
+ Output.Green("Current configuration:");
+ Output.White($" Binlog Host: {config.BinlogReader.Host}");
+ Output.White($" Binlog Port: {config.BinlogReader.Port}");
+ Output.White($" Binlog Username: {config.BinlogReader.Username}");
+ Output.White($" Snapshots Path: {config.SnapshotStorage.Path}");
+ Output.White($" Compression: {config.SnapshotStorage.Compression}");
+ Output.White($" Retention Days: {config.SnapshotStorage.RetentionDays}");
+ return 0;
+ }
+ catch (Exception ex)
+ {
+ Output.Red($"Error: {ex.Message}");
+ return 1;
+ }
+ }
+
+ static async Task HandleBinlogCommand(BinlogOptions options)
+ {
+ try
+ {
+ Console.WriteLine("Loading configuration...");
+ var config = LoadConfiguration(options.ConfigFile);
+ Console.WriteLine($"Configuration loaded. Binlog host: {config.BinlogReader.Host}:{config.BinlogReader.Port}");
+
+ var eventStore = new EventStore(config.EventStore);
+ var binlogReader = new BinlogReader(config.BinlogReader, eventStore);
+
+ // Set up event handlers
+ binlogReader.LogMessage += (sender, message) =>
+ {
+ if (options.Verbose)
+ Output.Gray($"[LOG] {message}");
+ else
+ Console.WriteLine($"[LOG] {message}");
+ };
+
+ binlogReader.EventReceived += (sender, evt) =>
+ {
+ // Only show individual events in verbose mode
+ if (options.Verbose)
+ {
+ var eventType = evt.EventType.ToString();
+ var timestamp = evt.Timestamp.ToString("HH:mm:ss.fff");
+ var position = evt.LogPosition;
+
+ Console.WriteLine($"[{timestamp}] {eventType} @ {position}");
+ Output.White($"[{timestamp}] {eventType} @ {position}");
+
+ if (evt.EventData != null)
+ {
+ var hexData = BitConverter.ToString(evt.EventData, 0, Math.Min(50, evt.EventData.Length));
+ var eventInfo = Encoding.UTF8.GetString(evt.EventData);
+ Console.WriteLine($" Info: {eventInfo}");
+ Output.Gray($" Info: {eventInfo}");
+ }
+ }
+ };
+
+ // Connect and start reading
+ Output.Green("Connecting to MySQL binlog...");
+ if (!await binlogReader.ConnectAsync())
+ {
+ Output.Red("Failed to connect to MySQL");
+ return 1;
+ }
+
+ Output.Green($"Starting binlog read from position {options.Position}");
+ if (!string.IsNullOrEmpty(options.BinlogFile))
+ {
+ Output.Yellow($"Using binlog file: {options.BinlogFile}");
+ }
+
+ // Start reading in background
+ var readTask = binlogReader.StartReadingAsync(options.BinlogFile, options.Position);
+
+ Output.Green("Binlog reading started. Press Ctrl+C to stop.");
+ Output.Yellow("Make some database changes to see events...");
+
+ // Wait for user to stop
+ Console.CancelKeyPress += (sender, e) =>
+ {
+ e.Cancel = true;
+ Output.Yellow("\nStopping binlog reader...");
+ binlogReader.StopReading();
+ };
+
+ await readTask;
+ binlogReader.Disconnect();
+
+ Output.Green("Binlog reading stopped");
+ return 0;
+ }
+ catch (Exception ex)
+ {
+ Output.Red($"Error: {ex.Message}");
+ Console.WriteLine($"Exception details: {ex}");
+ return 1;
+ }
+ }
+
+ static async Task HandleEventsCommand(EventsOptions options)
+ {
+ try
+ {
+ var config = LoadConfiguration(options.ConfigFile);
+
+ // Validate limit
+ var limitValidation = InputValidation.EventValidation.ValidateLimit(options.Limit);
+ if (limitValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {limitValidation.ErrorMessage}");
+ return 1;
+ }
+
+ // Validate table name if provided
+ if (!string.IsNullOrEmpty(options.Table))
+ {
+ var tableValidation = InputValidation.EventValidation.ValidateTableName(options.Table);
+ if (tableValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {tableValidation.ErrorMessage}");
+ return 1;
+ }
+ }
+
+ // Validate operation if provided
+ if (!string.IsNullOrEmpty(options.Operation))
+ {
+ var operationValidation = InputValidation.EventValidation.ValidateOperation(options.Operation);
+ if (operationValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {operationValidation.ErrorMessage}");
+ return 1;
+ }
+ }
+
+ // Validate event type if provided
+ if (!string.IsNullOrEmpty(options.EventType))
+ {
+ var eventTypeValidation = InputValidation.EventValidation.ValidateEventType(options.EventType);
+ if (eventTypeValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {eventTypeValidation.ErrorMessage}");
+ return 1;
+ }
+ }
+
+ // Validate data contains filter if provided
+ if (!string.IsNullOrEmpty(options.DataContains))
+ {
+ var dataContainsValidation = InputValidation.DataValidation.ValidateDataContains(options.DataContains);
+ if (dataContainsValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {dataContainsValidation.ErrorMessage}");
+ return 1;
+ }
+ }
+
+ // Validate output file if provided
+ if (!string.IsNullOrEmpty(options.OutputFile))
+ {
+ var outputFileValidation = InputValidation.DataValidation.ValidateOutputFile(options.OutputFile);
+ if (outputFileValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {outputFileValidation.ErrorMessage}");
+ return 1;
+ }
+ }
+
+ // Validate timestamps if provided
+ if (options.FromTimestamp > 0)
+ {
+ var fromTimestampValidation = InputValidation.EventValidation.ValidateTimestamp(options.FromTimestamp);
+ if (fromTimestampValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {fromTimestampValidation.ErrorMessage}");
+ return 1;
+ }
+ }
+
+ if (options.ToTimestamp > 0)
+ {
+ var toTimestampValidation = InputValidation.EventValidation.ValidateTimestamp(options.ToTimestamp);
+ if (toTimestampValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {toTimestampValidation.ErrorMessage}");
+ return 1;
+ }
+ }
+
+ // Validate snapshot ID
+ var snapshotIdValidation = InputValidation.SnapshotValidation.ValidateSnapshotId(options.SnapshotId);
+ if (snapshotIdValidation != ValidationResult.Success)
+ {
+ Output.Red($"Error: {snapshotIdValidation.ErrorMessage}");
+ return 1;
+ }
+
+ var snapshotService = new SnapshotService(config);
+ var snapshot = await snapshotService.GetSnapshotAsync(options.SnapshotId);
+
+ if (snapshot == null)
+ {
+ Output.Red($"Error: Snapshot {options.SnapshotId} not found");
+ return 1;
+ }
+
+ if (!snapshot.Type.Equals("Incremental", StringComparison.OrdinalIgnoreCase))
+ {
+ Output.Red($"Error: Snapshot {options.SnapshotId} is not an incremental snapshot");
+ return 1;
+ }
+
+ // Validate that output option is only used with sql-only
+ if (!string.IsNullOrEmpty(options.OutputFile) && (options.NoSqlOnly || !options.SqlOnly))
+ {
+ Output.Red("Error: --output option can only be used with --sql-only");
+ return 1;
+ }
+
+ // Determine if we should use SQL-only mode
+ var useSqlOnly = options.SqlOnly && !options.NoSqlOnly;
+
+ Output.Green($"Reading events from incremental snapshot {options.SnapshotId}: {snapshot.Description}");
+ Output.White($"File: {snapshot.FilePath}");
+ Output.White($"Created: {snapshot.CreatedAt:yyyy-MM-dd HH:mm:ss}");
+ Console.WriteLine();
+
+ // Create a temporary options object with the correct sql-only setting
+ var tempOptions = new EventsOptions
+ {
+ ConfigFile = options.ConfigFile,
+ SnapshotId = options.SnapshotId,
+ Table = options.Table,
+ Limit = options.Limit,
+ FromTimestamp = options.FromTimestamp,
+ ToTimestamp = options.ToTimestamp,
+ CountOnly = options.CountOnly,
+ Operation = options.Operation,
+ EventType = options.EventType,
+ DataContains = options.DataContains,
+ SqlOnly = useSqlOnly,
+ OutputFile = options.OutputFile
+ };
+
+ await ReadEventsFromIncrementalSnapshot(snapshot, tempOptions);
+ return 0;
+ }
+ catch (Exception ex)
+ {
+ Output.Red($"Error: {ex.Message}");
+ if (ex.InnerException != null)
+ {
+ Output.Red($"Inner error: {ex.InnerException.Message}");
+ }
+ return 1;
+ }
+ }
+
+ static async Task HandleRestoreCommand(RestoreOptions options)
+ {
+ try
+ {
+ var config = LoadConfiguration(options.ConfigFile);
+ var snapshotService = new SnapshotService(config);
+ var recoveryService = new RecoveryService(config);
+
+ // Validate snapshot exists
+ var snapshot = await snapshotService.GetSnapshotAsync(options.FromSnapshot);
+ if (snapshot == null)
+ {
+ Console.WriteLine($"Error: Snapshot {options.FromSnapshot} not found");
+ return 1;
+ }
+
+ Console.WriteLine($"Starting restore from snapshot {options.FromSnapshot}");
+ Console.WriteLine($"Snapshot: {snapshot.Description} created at {snapshot.CreatedAt:yyyy-MM-dd HH:mm:ss}");
+
+ if (options.DryRun)
+ {
+ Console.WriteLine("=== DRY RUN MODE ===");
+ Console.WriteLine($"Would restore database from snapshot {options.FromSnapshot}");
+ Console.WriteLine("No changes would be made to the database");
+ return 0;
+ }
+
+ // Perform the actual restore
+ await recoveryService.RestoreAsync(snapshot.Timestamp);
+
+ Console.WriteLine("Recovery completed successfully!");
+ return 0;
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Error: {ex.Message}");
+ Console.WriteLine($"Exception details: {ex}");
+ return 1;
+ }
+ }
+
+ static SnapshotConfiguration LoadConfiguration(string configFile)
+ {
+ // Validate and sanitize the config file path
+ var sanitizedConfigFile = InputValidation.SanitizePath(configFile ?? string.Empty);
+ var configPath = string.IsNullOrEmpty(sanitizedConfigFile) ? "appsettings.json" : sanitizedConfigFile;
+
+ // Validate file path
+ var pathValidation = InputValidation.FileValidation.ValidateFilePath(configPath);
+ if (pathValidation != ValidationResult.Success)
+ {
+ throw new ArgumentException($"Invalid configuration file path: {pathValidation.ErrorMessage}");
+ }
+
+ if (!File.Exists(configPath))
+ {
+ throw new FileNotFoundException($"Configuration file not found: {configPath}");
+ }
+
+ try
+ {
+ var configuration = new ConfigurationBuilder()
+ .SetBasePath(Directory.GetCurrentDirectory())
+ .AddJsonFile(configPath, optional: false)
+ .Build();
+
+ var config = new SnapshotConfiguration();
+ configuration.Bind(config);
+
+ // Validate the loaded configuration
+ var validationResult = ConfigurationValidation.ValidateConfiguration(config);
+ if (validationResult.Count > 0)
+ {
+ var errorMessages = string.Join("\n", validationResult.Select(e => e.ErrorMessage));
+ throw new InvalidOperationException($"Configuration validation failed:\n{errorMessages}");
+ }
+
+ return config;
+ }
+ catch (Exception ex) when (ex is not InvalidOperationException)
+ {
+ throw new InvalidOperationException($"Failed to load configuration from {configPath}: {ex.Message}", ex);
+ }
+ }
+
+ static List GetAllRelatedIncrementals(int snapshotId, List incrementalSnapshots)
+ {
+ var relatedIncrementals = new List();
+ var queue = new Queue(new[] { snapshotId });
+ var visited = new HashSet();
+
+ while (queue.Count > 0)
+ {
+ var currentId = queue.Dequeue();
+ if (visited.Contains(currentId)) continue;
+ visited.Add(currentId);
+
+ // Find all incremental snapshots that have this snapshot as their parent
+ var children = incrementalSnapshots.Where(s => s.ParentSnapshotId == currentId).ToList();
+ foreach (var child in children)
+ {
+ if (!visited.Contains(child.Id))
+ {
+ relatedIncrementals.Add(child);
+ queue.Enqueue(child.Id);
+ }
+ }
+ }
+
+ return relatedIncrementals;
+ }
+
+ static async Task ReadEventsFromIncrementalSnapshot(SnapshotInfo snapshot, EventsOptions options)
+ {
+ try
+ {
+ if (!File.Exists(snapshot.FilePath))
+ {
+ Console.WriteLine($"Error: Snapshot file not found: {snapshot.FilePath}");
+ return;
+ }
+
+ // Read the file with decryption/decompression support
+ var content = await ReadSnapshotFileWithDecryptionAsync(snapshot.FilePath, options.ConfigFile);
+ var lines = content.Split('\n');
+
+ // Parse and filter the output
+ var events = ParseIncrementalSnapshotOutput(lines, options);
+
+ if (options.CountOnly)
+ {
+ Console.WriteLine($"Total events in snapshot: {events.Count}");
+ return;
+ }
+
+ // Apply filters
+ if (!string.IsNullOrEmpty(options.Table))
+ {
+ events = events.Where(e => e.Contains($"### {options.Table}") || e.Contains($"`{options.Table}`")).ToList();
+ }
+
+ if (!string.IsNullOrEmpty(options.Operation))
+ {
+ events = events.Where(e => e.Contains($"### {options.Operation}") || e.Contains($"{options.Operation.ToUpper()} ")).ToList();
+ }
+
+ if (!string.IsNullOrEmpty(options.EventType))
+ {
+ events = events.Where(e => e.Contains($"### {options.EventType}") || e.Contains($"{options.EventType.ToUpper()} ")).ToList();
+ }
+
+ if (!string.IsNullOrEmpty(options.DataContains))
+ {
+ events = events.Where(e => e.Contains(options.DataContains, StringComparison.OrdinalIgnoreCase)).ToList();
+ }
+
+ // Apply timestamp filters if specified
+ if (options.FromTimestamp > 0 || options.ToTimestamp > 0)
+ {
+ events = events.Where(e =>
+ {
+ // Extract timestamp from event if possible
+ var timestampMatch = System.Text.RegularExpressions.Regex.Match(e, @"#(\d{10,13})");
+ if (timestampMatch.Success && long.TryParse(timestampMatch.Groups[1].Value, out var eventTimestamp))
+ {
+ if (options.FromTimestamp > 0 && eventTimestamp < options.FromTimestamp) return false;
+ if (options.ToTimestamp > 0 && eventTimestamp > options.ToTimestamp) return false;
+ }
+ return true;
+ }).ToList();
+ }
+
+ // Apply limit
+ events = events.Take(options.Limit).ToList();
+
+ // Save to file if output option is specified
+ if (!string.IsNullOrEmpty(options.OutputFile))
+ {
+ try
+ {
+ var sqlContent = string.Join("\n\n", events);
+ await File.WriteAllTextAsync(options.OutputFile, sqlContent);
+ Console.WriteLine($"SQL queries saved to: {options.OutputFile}");
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Error saving to file: {ex.Message}");
+ }
+ }
+
+ Console.WriteLine($"Found {events.Count} events in snapshot:");
+ Console.WriteLine();
+
+ foreach (var evt in events)
+ {
+ Console.WriteLine(evt);
+ Console.WriteLine();
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Error reading snapshot events: {ex.Message}");
+ }
+ }
+
+ static async Task ReadSnapshotFileWithDecryptionAsync(string filePath, string configFile)
+ {
+ try
+ {
+ var config = LoadConfiguration(configFile);
+ var fileService = new OptimizedFileService();
+
+ // Initialize encryption service - match RecoveryService pattern
+ var encryptionService = new EncryptionService(
+ config.Security.EncryptionKey,
+ config.Security.Encryption
+ );
+
+ // Check if file is encrypted and compressed
+ if (filePath.EndsWith(".lz4.enc"))
+ {
+ // First decrypt, then decompress
+ var decryptedPath = filePath.Replace(".lz4.enc", ".lz4.tmp");
+ var decompressedPath = filePath.Replace(".lz4.enc", ".sql.tmp");
+
+ try
+ {
+ // Decrypt the file
+ await encryptionService.DecryptFileAsync(filePath, decryptedPath);
+
+ // Decompress the decrypted file
+ await fileService.DecompressFileStreamingAsync(decryptedPath, decompressedPath);
+
+ // Read the final SQL content
+ var content = await fileService.ReadFileOptimizedAsync(decompressedPath);
+ return Encoding.UTF8.GetString(content);
+ }
+ finally
+ {
+ // Clean up temporary files
+ if (File.Exists(decryptedPath)) File.Delete(decryptedPath);
+ if (File.Exists(decompressedPath)) File.Delete(decompressedPath);
+ }
+ }
+ else if (filePath.EndsWith(".lz4"))
+ {
+ // Only compressed, not encrypted
+ var tempPath = filePath.Replace(".lz4", ".tmp");
+ await fileService.DecompressFileStreamingAsync(filePath, tempPath);
+
+ var content = await fileService.ReadFileOptimizedAsync(tempPath);
+ File.Delete(tempPath); // Clean up temp file
+ return Encoding.UTF8.GetString(content);
+ }
+ else if (filePath.EndsWith(".enc"))
+ {
+ // Only encrypted, not compressed
+ var tempPath = filePath.Replace(".enc", ".tmp");
+ await encryptionService.DecryptFileAsync(filePath, tempPath);
+
+ var content = await fileService.ReadFileOptimizedAsync(tempPath);
+ File.Delete(tempPath); // Clean up temp file
+ return Encoding.UTF8.GetString(content);
+ }
+ else
+ {
+ // Plain text file
+ var content = await fileService.ReadFileOptimizedAsync(filePath);
+ return Encoding.UTF8.GetString(content);
+ }
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Failed to read snapshot file {filePath}: {ex.Message}", ex);
+ }
+ }
+
+ static List ParseIncrementalSnapshotOutput(string[] lines, EventsOptions options)
+ {
+ if (options.SqlOnly)
+ {
+ return ExtractSqlQueries(lines);
+ }
+
+ var events = new List();
+ var currentEvent = new List();
+ var inEvent = false;
+
+ foreach (var line in lines)
+ {
+ // Start of a new event
+ if (line.StartsWith("# at ") || line.StartsWith("#") && line.Contains("server id"))
+ {
+ if (inEvent && currentEvent.Count > 0)
+ {
+ events.Add(string.Join("\n", currentEvent));
+ currentEvent.Clear();
+ }
+ inEvent = true;
+ currentEvent.Add(line);
+ }
+ // SQL statements and data
+ else if (inEvent && (line.StartsWith("### ") || line.StartsWith("SET ") || line.StartsWith("INSERT ") ||
+ line.StartsWith("UPDATE ") || line.StartsWith("DELETE ") || line.StartsWith("CREATE ") ||
+ line.StartsWith("ALTER ") || line.StartsWith("DROP ")))
+ {
+ currentEvent.Add(line);
+ }
+ // End of event (empty line or new section)
+ else if (inEvent && string.IsNullOrWhiteSpace(line))
+ {
+ if (currentEvent.Count > 0)
+ {
+ events.Add(string.Join("\n", currentEvent));
+ currentEvent.Clear();
+ }
+ inEvent = false;
+ }
+ // Continue adding lines to current event
+ else if (inEvent)
+ {
+ currentEvent.Add(line);
+ }
+ }
+
+ // Add the last event if any
+ if (inEvent && currentEvent.Count > 0)
+ {
+ events.Add(string.Join("\n", currentEvent));
+ }
+
+ return events;
+ }
+
+ static List ExtractSqlQueries(string[] lines)
+ {
+ var sqlQueries = new List();
+ var currentQuery = new List();
+ var inQuery = false;
+
+ foreach (var line in lines)
+ {
+ // Look for SQL query annotations
+ if (line.StartsWith("#Q> "))
+ {
+ if (inQuery && currentQuery.Count > 0)
+ {
+ sqlQueries.Add(string.Join("\n", currentQuery));
+ currentQuery.Clear();
+ }
+ inQuery = true;
+ currentQuery.Add(line.Substring(4)); // Remove "#Q> " prefix
+ }
+ // Look for direct SQL statements
+ else if (line.StartsWith("INSERT ") || line.StartsWith("UPDATE ") ||
+ line.StartsWith("DELETE ") || line.StartsWith("CREATE ") ||
+ line.StartsWith("ALTER ") || line.StartsWith("DROP ") ||
+ line.StartsWith("SET ") || line.StartsWith("START ") ||
+ line.StartsWith("COMMIT") || line.StartsWith("ROLLBACK"))
+ {
+ if (inQuery && currentQuery.Count > 0)
+ {
+ sqlQueries.Add(string.Join("\n", currentQuery));
+ currentQuery.Clear();
+ }
+ inQuery = true;
+ currentQuery.Add(line);
+ }
+ // Look for SET statements that are part of transactions
+ else if (line.StartsWith("/*M!") && line.Contains("SET"))
+ {
+ // Extract the SET statement from MariaDB-specific comments
+ var setMatch = System.Text.RegularExpressions.Regex.Match(line, @"SET[^;]+;");
+ if (setMatch.Success)
+ {
+ if (inQuery && currentQuery.Count > 0)
+ {
+ sqlQueries.Add(string.Join("\n", currentQuery));
+ currentQuery.Clear();
+ }
+ inQuery = true;
+ currentQuery.Add(setMatch.Value);
+ }
+ }
+ // End of query (empty line or new section)
+ else if (inQuery && (string.IsNullOrWhiteSpace(line) || line.StartsWith("# at ")))
+ {
+ if (currentQuery.Count > 0)
+ {
+ sqlQueries.Add(string.Join("\n", currentQuery));
+ currentQuery.Clear();
+ }
+ inQuery = false;
+ }
+ // Continue adding lines to current query if we're in a multi-line statement
+ else if (inQuery && !line.StartsWith("#") && !string.IsNullOrWhiteSpace(line))
+ {
+ currentQuery.Add(line);
+ }
+ }
+
+ // Add the last query if any
+ if (inQuery && currentQuery.Count > 0)
+ {
+ sqlQueries.Add(string.Join("\n", currentQuery));
+ }
+
+ return sqlQueries.Where(q => !string.IsNullOrWhiteSpace(q)).ToList();
+ }
+ }
+
+ [Verb("snapshot", HelpText = "Manage database snapshots")]
+ class SnapshotOptions
+ {
+ [Option('c', "command", Required = true, HelpText = "Command: create, list, show, delete")]
+ public string Command { get; set; }
+
+ [Option('n', "name", HelpText = "Snapshot name")]
+ public string Name { get; set; }
+
+ [Option('f', "config", HelpText = "Configuration file path")]
+ public string ConfigFile { get; set; }
+
+ [Option("type", HelpText = "Snapshot type: full or incremental", Default = "full")]
+ public string Type { get; set; }
+ }
+
+ [Verb("recovery", HelpText = "Manage recovery points and restore operations")]
+ class RecoveryOptions
+ {
+ [Option('c', "command", Required = true, HelpText = "Command: create-point, list-points, restore")]
+ public string Command { get; set; }
+
+ [Option('n', "name", HelpText = "Recovery point name")]
+ public string Name { get; set; }
+
+ [Option('f', "config", HelpText = "Configuration file path")]
+ public string ConfigFile { get; set; }
+
+ [Option('d', "dry-run", HelpText = "Perform dry run for restore operations")]
+ public bool DryRun { get; set; }
+ }
+
+ [Verb("config", HelpText = "Show current configuration")]
+ class ConfigOptions
+ {
+ [Option('f', "config", HelpText = "Configuration file path")]
+ public string ConfigFile { get; set; }
+ }
+
+ [Verb("binlog", HelpText = "Read MySQL binlog events in real-time")]
+ class BinlogOptions
+ {
+ [Option('f', "config", HelpText = "Configuration file path")]
+ public string ConfigFile { get; set; }
+
+ [Option('b', "binlog-file", HelpText = "Binlog file name (optional)")]
+ public string BinlogFile { get; set; }
+
+ [Option('p', "position", Default = 4L, HelpText = "Starting position in binlog")]
+ public long Position { get; set; }
+
+ [Option('v', "verbose", HelpText = "Verbose output")]
+ public bool Verbose { get; set; }
+ }
+
+ [Verb("events", HelpText = "Query events from incremental snapshots")]
+ public class EventsOptions
+ {
+ [Option('f', "config", Required = true, HelpText = "Configuration file path")]
+ public string ConfigFile { get; set; } = string.Empty;
+
+ [Option('s', "snapshot", Required = true, HelpText = "Incremental snapshot ID to read events from")]
+ public int SnapshotId { get; set; }
+
+ [Option('t', "table", HelpText = "Filter by table name")]
+ public string? Table { get; set; }
+
+ [Option('l', "limit", Default = 100, HelpText = "Maximum number of events to return")]
+ public int Limit { get; set; } = 100;
+
+ [Option("from", HelpText = "Filter events from timestamp")]
+ public long FromTimestamp { get; set; }
+
+ [Option("to", HelpText = "Filter events to timestamp")]
+ public long ToTimestamp { get; set; }
+
+ [Option('c', "count", HelpText = "Show only count of events")]
+ public bool CountOnly { get; set; } = false;
+
+ [Option('o', "operation", HelpText = "Filter by operation type (insert, update, delete, query, etc.)")]
+ public string? Operation { get; set; }
+
+ [Option('e', "event-type", HelpText = "Filter by event type (binlog, status, etc.)")]
+ public string? EventType { get; set; }
+
+ [Option('k', "data-contains", HelpText = "Filter by keyword in event data (case-insensitive)")]
+ public string? DataContains { get; set; }
+
+ [Option('q', "sql-only", HelpText = "Extract and display only SQL queries from binlog events", Default = true)]
+ public bool SqlOnly { get; set; } = true;
+
+ [Option("no-sql-only", HelpText = "Disable SQL-only mode and show full binlog events")]
+ public bool NoSqlOnly { get; set; } = false;
+
+ [Option('o', "output", HelpText = "Save extracted SQL queries to specified file (only available with --sql-only)")]
+ public string? OutputFile { get; set; }
+ }
+
+ [Verb("restore", HelpText = "Restore database from snapshot")]
+ public class RestoreOptions
+ {
+ [Option('f', "config", Required = true, HelpText = "Configuration file path")]
+ public string ConfigFile { get; set; }
+
+ [Option("from-snapshot", Required = true, HelpText = "Source snapshot ID")]
+ public int FromSnapshot { get; set; }
+
+ [Option("dry-run", HelpText = "Preview recovery without performing it")]
+ public bool DryRun { get; set; }
+ }
+
+ static class Output
+ {
+ public static void Green(string s) => Console.WriteLine(Crayon.Output.Green(s));
+ public static void Red(string s) => Console.WriteLine(Crayon.Output.Red(s));
+ public static void Yellow(string s) => Console.WriteLine(Crayon.Output.Yellow(s));
+ public static void White(string s) => Console.WriteLine(Crayon.Output.White(s));
+ public static void Gray(string s) => Console.WriteLine(Crayon.Output.Dim(s));
+ }
+}
\ No newline at end of file
diff --git a/README.md b/README.md
index e69de29..236c480 100644
--- a/README.md
+++ b/README.md
@@ -0,0 +1,297 @@
+# Database Snapshots Service
+
+A command-line utility for creating, managing, and restoring database snapshots for MySQL. It provides features for full and incremental backups, historical binlog event reading, and point-in-time recovery.
+
+## Features
+
+* **Full and Incremental Snapshots**: Create full database backups or smaller, incremental snapshots that capture changes since the last backup.
+* **Historical Binlog Reading**: Read and process historical binlog events from MySQL for auditing and analysis.
+* **Point-in-Time Recovery**: Restore the database to a specific state using a combination of full snapshots and incremental event data.
+* **File-Based Event Store**: All database events are captured and stored locally in a file-based event store for auditing and recovery.
+* **Optimized Performance**: Includes optimizations for file handling, such as streaming for large files and parallel processing where appropriate.
+* **Security**: Supports optional AES-256-CBC encryption for snapshot files to protect sensitive data.
+* **Compression**: Supports optional LZ4 compression to reduce the storage footprint of snapshots.
+* **Rich CLI**: A comprehensive command-line interface for interacting with the service.
+
+## Prerequisites
+
+- .NET 6.0 or later
+- MySQL 5.7+ or MariaDB 10.2+ with binlog enabled
+- `mysqldump` and `mysqlbinlog` utilities installed
+- Appropriate MySQL user permissions for:
+ - `SELECT`, `SHOW`, `RELOAD`, `LOCK TABLES`, `REPLICATION CLIENT` (for binlog reading)
+ - `SELECT`, `SHOW`, `RELOAD`, `LOCK TABLES` (for snapshots)
+
+## Installation
+
+1. Clone the repository:
+ ```sh
+ git clone
+ cd DatabaseSnapshotsService
+ ```
+
+2. Build the project:
+ ```sh
+ dotnet build
+ ```
+
+3. Create a configuration file:
+ ```sh
+ cp config.example.json config.json
+ ```
+
+4. Edit `config.json` with your database settings (see Configuration section below).
+
+## Project Structure
+
+```
+DatabaseSnapshotsService/
+├── Program.cs # Main CLI entry point
+├── DatabaseSnapshots.cs # Snapshot management logic
+├── Models/
+│ ├── Configuration.cs # Configuration classes
+│ ├── ConfigurationValidation.cs # Configuration validation
+│ ├── DataModels.cs # Data transfer objects
+│ └── InputValidation.cs # Input validation logic
+├── Services/
+│ ├── BinlogReader.cs # MySQL binlog reading service
+│ ├── EncryptionService.cs # File encryption/decryption
+│ ├── EventStore.cs # Event storage service
+│ ├── OptimizedFileService.cs # Optimized file operations
+│ ├── RecoveryService.cs # Database recovery service
+│ └── SnapshotService.cs # Snapshot creation and management
+└── README.md # This file
+```
+
+## Configuration
+
+The service is configured via a `config.json` file in the root directory. Here's a complete example with all available options:
+
+```json
+{
+ "connectionString": "Server=localhost;Database=trading;Uid=root;Pwd=password;",
+ "binlogReader": {
+ "host": "localhost",
+ "port": 3306,
+ "username": "binlog_reader",
+ "password": "secure_password",
+ "serverId": 999,
+ "startPosition": 4,
+ "heartbeatInterval": 30
+ },
+ "snapshotStorage": {
+ "path": "./snapshots",
+ "compression": true,
+ "retentionDays": 30,
+ "maxFileSize": 104857600,
+ "dumpOptimizations": {
+ "singleTransaction": true,
+ "includeRoutines": true,
+ "includeTriggers": true,
+ "includeEvents": true,
+ "extendedInsert": true,
+ "completeInsert": true,
+ "hexBlob": true,
+ "netBufferLength": 16384,
+ "maxAllowedPacket": "1G",
+ "excludeTables": [],
+ "includeTables": [],
+ "quick": true,
+ "orderByPrimary": true,
+ "flushLogs": true,
+ "masterData": 2,
+ "compact": false,
+ "noAutocommit": false,
+ "lockTables": false
+ }
+ },
+ "eventStore": {
+ "path": "./events",
+ "maxFileSize": 52428800,
+ "retentionDays": 90,
+ "batchSize": 1000,
+ "flushInterval": 5
+ },
+ "security": {
+ "encryption": false,
+ "encryptionKey": null
+ }
+}
+```
+
+### Configuration Options
+
+#### Root Level
+- **connectionString** (string, required): MySQL connection string for the target database.
+
+#### binlogReader
+- **host** (string, default: "localhost"): MySQL server hostname
+- **port** (int, default: 3306): MySQL server port
+- **username** (string, default: "binlog_reader"): Username for binlog reader
+- **password** (string, default: "secure_password"): Password for binlog reader
+- **serverId** (int, default: 999): Server ID for binlog reader (must be unique)
+- **startPosition** (long, default: 4): Starting position in binlog
+- **heartbeatInterval** (int, default: 30): Interval in seconds to send heartbeats
+
+#### snapshotStorage
+- **path** (string, default: "./snapshots"): Directory where snapshots are stored
+- **compression** (bool, default: true): Enable LZ4 compression for snapshots
+- **retentionDays** (int, default: 30): Number of days to retain snapshots
+- **maxFileSize** (long, default: 100MB): Maximum file size for snapshots in bytes
+
+#### snapshotStorage.dumpOptimizations
+- **singleTransaction** (bool, default: true): Use --single-transaction for consistent backups
+- **includeRoutines** (bool, default: true): Include stored procedures and functions
+- **includeTriggers** (bool, default: true): Include triggers
+- **includeEvents** (bool, default: true): Include events
+- **extendedInsert** (bool, default: true): Use extended INSERT syntax
+- **completeInsert** (bool, default: true): Use complete INSERT syntax
+- **hexBlob** (bool, default: true): Dump binary columns in hexadecimal
+- **netBufferLength** (int, default: 16384): TCP/IP buffer length
+- **maxAllowedPacket** (string, default: "1G"): Maximum allowed packet size
+- **excludeTables** (string[], default: []): Tables to exclude from backup
+- **includeTables** (string[], default: []): Tables to include in backup
+- **quick** (bool, default: true): Use --quick option for faster dumps
+- **orderByPrimary** (bool, default: true): Order by primary key
+- **flushLogs** (bool, default: true): Flush logs before dump
+- **masterData** (int, default: 2): --master-data option value
+- **compact** (bool, default: false): Use compact dump format
+- **noAutocommit** (bool, default: false): Disable autocommit
+- **lockTables** (bool, default: false): Lock all tables before dump
+
+#### eventStore
+- **path** (string, default: "./events"): Directory where events are stored
+- **maxFileSize** (long, default: 50MB): Maximum file size for event files
+- **retentionDays** (int, default: 90): Number of days to retain events
+- **batchSize** (int, default: 1000): Number of events to batch before writing
+- **flushInterval** (int, default: 5): Interval in seconds to flush events
+
+#### security
+- **encryption** (bool, default: false): Enable AES-256-CBC encryption
+- **encryptionKey** (string, optional): Base64-encoded encryption key (required if encryption is enabled)
+
+## Usage
+
+The service is used via the command line.
+
+### Snapshots
+
+Manage database snapshots.
+
+* **Create a full snapshot:**
+ ```sh
+ dotnet run -- snapshot -c create -n "MyFullSnapshot" --type "full"
+ ```
+
+* **Create an incremental snapshot:**
+ ```sh
+ dotnet run -- snapshot -c create -n "MyIncrementalSnapshot" --type "incremental"
+ ```
+
+* **List all snapshots:**
+ ```sh
+ dotnet run -- snapshot -c list
+ ```
+
+* **Show details for a specific snapshot:**
+ ```sh
+ dotnet run -- snapshot -c show -n
+ ```
+
+* **Delete a snapshot:**
+ ```sh
+ dotnet run -- snapshot -c delete -n
+ ```
+
+### Recovery
+
+Manage recovery points.
+
+* **Create a recovery point:**
+ ```sh
+ dotnet run -- recovery -c create-point -n "MyRecoveryPoint"
+ ```
+
+* **List all recovery points:**
+ ```sh
+ dotnet run -- recovery -c list-points
+ ```
+
+### Restore
+
+Restore the database from a snapshot.
+
+* **Restore from a snapshot:**
+ ```sh
+ dotnet run -- restore --from-snapshot
+ ```
+
+* **Perform a dry run of a restore:**
+ ```sh
+ dotnet run -- restore --from-snapshot --dry-run
+ ```
+
+### Events
+
+Query events from incremental snapshots.
+
+* **Query events from a snapshot:**
+ ```sh
+ dotnet run -- events -s --table "my_table" -l 50
+ ```
+
+* **Filter events by operation type:**
+ ```sh
+ dotnet run -- events -s --operation "insert"
+ ```
+
+### Binlog
+
+Read historical binlog events.
+
+* **Read historical binlog events:**
+ ```sh
+ dotnet run -- binlog
+ ```
+
+### Configuration
+
+Display the current configuration.
+
+* **Show current configuration:**
+ ```sh
+ dotnet run -- config
+ ```
+
+## Troubleshooting
+
+### Common Issues
+
+1. **Connection Failed**: Ensure MySQL is running and the connection string is correct
+2. **Permission Denied**: Verify the MySQL user has the required permissions
+3. **Binlog Not Found**: Ensure binlog is enabled in MySQL configuration
+4. **Encryption Errors**: Check that the encryption key is valid Base64 and at least 32 characters
+5. **File System Errors**: Ensure the service has write permissions to the configured paths
+
+### Logs
+
+The service outputs colored console messages:
+- **Green**: Success messages
+- **Yellow**: Warnings and informational messages
+- **Red**: Error messages
+- **White**: General output
+- **Gray**: Debug information
+
+### Performance Tips
+
+- Use `quick` and `orderByPrimary` options for faster dumps
+- Enable compression for large databases
+- Adjust `batchSize` and `flushInterval` for optimal event store performance
+
+## Security Considerations
+
+- Store encryption keys securely and never commit them to version control
+- Use strong, unique encryption keys (generate with `EncryptionService.GenerateEncryptionKey()`)
+- Ensure proper file system permissions for snapshot and event storage
+- Consider network security for remote MySQL connections
+- Regularly rotate encryption keys and update passwords
\ No newline at end of file
diff --git a/Services/BinlogReader.cs b/Services/BinlogReader.cs
new file mode 100644
index 0000000..be23115
--- /dev/null
+++ b/Services/BinlogReader.cs
@@ -0,0 +1,345 @@
+using System;
+using System.Collections.Generic;
+using System.IO;
+using System.Text;
+using System.Threading;
+using System.Threading.Tasks;
+using MySqlConnector;
+using DatabaseSnapshotsService.Models;
+
+namespace DatabaseSnapshotsService.Services
+{
+ public class BinlogReader
+ {
+ private readonly BinlogReaderConfig _config;
+ private readonly EventStore _eventStore;
+ private MySqlConnection _connection;
+ private bool _isConnected;
+ private bool _isReading;
+ private CancellationTokenSource _cancellationTokenSource;
+
+ public event EventHandler EventReceived;
+ public event EventHandler LogMessage;
+
+ public BinlogReader(BinlogReaderConfig config, EventStore eventStore)
+ {
+ _config = config;
+ _eventStore = eventStore;
+ }
+
+ public async Task ConnectAsync()
+ {
+ try
+ {
+ LogMessage?.Invoke(this, $"Attempting to connect to {_config.Host}:{_config.Port}");
+
+ var connectionString = $"Server={_config.Host};Port={_config.Port};User ID={_config.Username};Password={_config.Password};";
+ _connection = new MySqlConnection(connectionString);
+
+ await _connection.OpenAsync();
+ _isConnected = true;
+
+ LogMessage?.Invoke(this, $"Connected to MySQL at {_config.Host}:{_config.Port}");
+ return true;
+ }
+ catch (Exception ex)
+ {
+ LogMessage?.Invoke(this, $"Connection failed: {ex.Message}");
+ return false;
+ }
+ }
+
+ public async Task StartReadingAsync(string binlogFile = null, long position = 4)
+ {
+ if (!_isConnected)
+ {
+ throw new InvalidOperationException("Not connected to MySQL");
+ }
+
+ _cancellationTokenSource = new CancellationTokenSource();
+ _isReading = true;
+
+ try
+ {
+ LogMessage?.Invoke(this, $"Starting binlog read from position {position}");
+
+ // Get current binlog status
+ await ReadBinlogStatusAsync();
+
+ // Read binlog events
+ await ReadBinlogEventsAsync(binlogFile, position);
+ }
+ catch (OperationCanceledException)
+ {
+ LogMessage?.Invoke(this, "Binlog reading stopped");
+ }
+ catch (Exception ex)
+ {
+ LogMessage?.Invoke(this, $"Error reading binlog: {ex.Message}");
+ }
+ finally
+ {
+ _isReading = false;
+ }
+ }
+
+ public void StopReading()
+ {
+ _cancellationTokenSource?.Cancel();
+ _isReading = false;
+ }
+
+ public void Disconnect()
+ {
+ StopReading();
+ _connection?.Close();
+ _connection?.Dispose();
+ _isConnected = false;
+ }
+
+ private async Task ReadBinlogStatusAsync()
+ {
+ using var command = _connection.CreateCommand();
+ command.CommandText = "SHOW MASTER STATUS";
+
+ using var reader = await command.ExecuteReaderAsync();
+ if (await reader.ReadAsync())
+ {
+ var file = reader.GetString("File");
+ var position = reader.GetInt64("Position");
+
+ LogMessage?.Invoke(this, $"Current binlog: {file} at position {position}");
+
+ // Create a status event and store it
+ var statusEvent = new DatabaseEvent
+ {
+ Type = "status",
+ Table = "system",
+ Operation = "binlog_status",
+ Data = $"SHOW MASTER STATUS - File: {file}, Position: {position}",
+ BinlogPosition = (long)position,
+ ServerId = _config.ServerId
+ };
+
+ await _eventStore.StoreEventAsync(statusEvent);
+
+ // Create a binlog event for the status
+ var evt = new BinlogEvent
+ {
+ Timestamp = DateTimeOffset.UtcNow,
+ EventType = BinlogEventType.QUERY_EVENT,
+ LogPosition = (uint)position,
+ EventSize = 0,
+ Flags = 0,
+ EventData = Encoding.UTF8.GetBytes($"SHOW MASTER STATUS - File: {file}, Position: {position}"),
+ RawPacket = new byte[0]
+ };
+
+ EventReceived?.Invoke(this, evt);
+ }
+ }
+
+ private async Task ReadBinlogEventsAsync(string binlogFile, long position)
+ {
+ try
+ {
+ // Get the binlog file to read from
+ var targetFile = binlogFile;
+ if (string.IsNullOrEmpty(targetFile))
+ {
+ targetFile = await GetCurrentBinlogFileAsync();
+ }
+
+ LogMessage?.Invoke(this, $"Reading binlog events from {targetFile} starting at position {position}");
+
+ using var command = _connection.CreateCommand();
+ command.CommandText = $"SHOW BINLOG EVENTS IN '{targetFile}' FROM {position}";
+
+ LogMessage?.Invoke(this, $"Executing query: {command.CommandText}");
+
+ using var reader = await command.ExecuteReaderAsync();
+ var eventCount = 0;
+
+ LogMessage?.Invoke(this, "Starting to read binlog events...");
+
+ while (await reader.ReadAsync())
+ {
+ if (_cancellationTokenSource.Token.IsCancellationRequested)
+ break;
+
+ var logName = reader.GetString("Log_name");
+ var logPos = reader.GetInt64("Pos");
+ var eventType = reader.GetString("Event_type");
+ var serverId = reader.GetInt32("Server_id");
+ var endLogPos = reader.GetInt64("End_log_pos");
+ var info = reader.GetString("Info");
+
+ // Only log every 100 events to reduce console output
+ eventCount++;
+ if (eventCount % 100 == 0)
+ {
+ LogMessage?.Invoke(this, $"Processed {eventCount} events...");
+ }
+
+ // Parse event type
+ var binlogEventType = ParseEventType(eventType);
+
+ // Create and store database event
+ var databaseEvent = new DatabaseEvent
+ {
+ Type = "binlog",
+ Table = ExtractTableName(info),
+ Operation = ExtractOperation(eventType, info),
+ Data = info,
+ BinlogPosition = logPos,
+ ServerId = serverId
+ };
+
+ await _eventStore.StoreEventAsync(databaseEvent);
+
+ // Create binlog event for real-time processing
+ var evt = new BinlogEvent
+ {
+ Timestamp = DateTimeOffset.UtcNow,
+ EventType = binlogEventType,
+ LogPosition = (uint)logPos,
+ EventSize = (uint)(endLogPos - logPos),
+ Flags = 0,
+ EventData = Encoding.UTF8.GetBytes(info),
+ RawPacket = new byte[0]
+ };
+
+ EventReceived?.Invoke(this, evt);
+ }
+
+ LogMessage?.Invoke(this, $"Completed reading binlog events. Total events processed: {eventCount}");
+ }
+ catch (Exception ex)
+ {
+ LogMessage?.Invoke(this, $"Error reading binlog events: {ex.Message}");
+ throw;
+ }
+ }
+
+ private async Task GetCurrentBinlogFileAsync()
+ {
+ using var command = _connection.CreateCommand();
+ command.CommandText = "SHOW MASTER STATUS";
+
+ using var reader = await command.ExecuteReaderAsync();
+ if (await reader.ReadAsync())
+ {
+ return reader.GetString("File");
+ }
+
+ throw new Exception("Could not determine current binlog file");
+ }
+
+ private string ExtractTableName(string info)
+ {
+ // Try to extract table name from various event types
+ if (info.Contains("table_id:"))
+ {
+ var match = System.Text.RegularExpressions.Regex.Match(info, @"table_id: \d+ \(([^)]+)\)");
+ if (match.Success)
+ {
+ return match.Groups[1].Value;
+ }
+ }
+
+ // For query events, try to extract table name from SQL
+ if (info.Contains("INSERT INTO") || info.Contains("UPDATE") || info.Contains("DELETE FROM"))
+ {
+ var match = System.Text.RegularExpressions.Regex.Match(info, @"(?:INSERT INTO|UPDATE|DELETE FROM)\s+(\w+)");
+ if (match.Success)
+ {
+ return match.Groups[1].Value;
+ }
+ }
+
+ return "unknown";
+ }
+
+ private string ExtractOperation(string eventType, string info)
+ {
+ return eventType.ToUpper() switch
+ {
+ "WRITE_ROWS_V1" => "insert",
+ "UPDATE_ROWS_V1" => "update",
+ "DELETE_ROWS_V1" => "delete",
+ "QUERY" => "query",
+ "ANNOTATE_ROWS" => "query",
+ _ => eventType.ToLower()
+ };
+ }
+
+ private BinlogEventType ParseEventType(string eventType)
+ {
+ return eventType.ToUpper() switch
+ {
+ "QUERY" => BinlogEventType.QUERY_EVENT,
+ "XID" => BinlogEventType.XID_EVENT,
+ "GTID" => BinlogEventType.GTID_EVENT,
+ "TABLE_MAP" => BinlogEventType.TABLE_MAP_EVENT,
+ "WRITE_ROWS_V1" => BinlogEventType.WRITE_ROWS_EVENT_V1,
+ "UPDATE_ROWS_V1" => BinlogEventType.UPDATE_ROWS_EVENT_V1,
+ "DELETE_ROWS_V1" => BinlogEventType.DELETE_ROWS_EVENT_V1,
+ "ANNOTATE_ROWS" => BinlogEventType.QUERY_EVENT, // Treat as query event
+ "ROTATE" => BinlogEventType.ROTATE_EVENT,
+ "FORMAT_DESCRIPTION" => BinlogEventType.FORMAT_DESCRIPTION_EVENT,
+ _ => BinlogEventType.UNKNOWN_EVENT
+ };
+ }
+ }
+
+ public class BinlogEvent
+ {
+ public DateTimeOffset Timestamp { get; set; }
+ public BinlogEventType EventType { get; set; }
+ public uint LogPosition { get; set; }
+ public uint EventSize { get; set; }
+ public ushort Flags { get; set; }
+ public byte[] EventData { get; set; }
+ public byte[] RawPacket { get; set; }
+ }
+
+ public enum BinlogEventType : byte
+ {
+ UNKNOWN_EVENT = 0x00,
+ START_EVENT_V3 = 0x01,
+ QUERY_EVENT = 0x02,
+ STOP_EVENT = 0x03,
+ ROTATE_EVENT = 0x04,
+ INTVAR_EVENT = 0x05,
+ LOAD_EVENT = 0x06,
+ SLAVE_EVENT = 0x07,
+ CREATE_FILE_EVENT = 0x08,
+ APPEND_BLOCK_EVENT = 0x09,
+ EXEC_LOAD_EVENT = 0x0A,
+ DELETE_FILE_EVENT = 0x0B,
+ NEW_LOAD_EVENT = 0x0C,
+ RAND_EVENT = 0x0D,
+ USER_VAR_EVENT = 0x0E,
+ FORMAT_DESCRIPTION_EVENT = 0x0F,
+ XID_EVENT = 0x10,
+ BEGIN_LOAD_QUERY_EVENT = 0x11,
+ EXECUTE_LOAD_QUERY_EVENT = 0x12,
+ TABLE_MAP_EVENT = 0x13,
+ WRITE_ROWS_EVENT_V0 = 0x14,
+ UPDATE_ROWS_EVENT_V0 = 0x15,
+ DELETE_ROWS_EVENT_V0 = 0x16,
+ WRITE_ROWS_EVENT_V1 = 0x17,
+ UPDATE_ROWS_EVENT_V1 = 0x18,
+ DELETE_ROWS_EVENT_V1 = 0x19,
+ INCIDENT_EVENT = 0x1A,
+ HEARTBEAT_EVENT = 0x1B,
+ IGNORABLE_EVENT = 0x1C,
+ ROWS_QUERY_EVENT = 0x1D,
+ WRITE_ROWS_EVENT_V2 = 0x1E,
+ UPDATE_ROWS_EVENT_V2 = 0x1F,
+ DELETE_ROWS_EVENT_V2 = 0x20,
+ GTID_EVENT = 0x21,
+ ANONYMOUS_GTID_EVENT = 0x22,
+ PREVIOUS_GTIDS_EVENT = 0x23
+ }
+}
\ No newline at end of file
diff --git a/Services/EncryptionService.cs b/Services/EncryptionService.cs
new file mode 100644
index 0000000..b2393c7
--- /dev/null
+++ b/Services/EncryptionService.cs
@@ -0,0 +1,324 @@
+using System.Security.Cryptography;
+using System.Text;
+
+namespace DatabaseSnapshotsService.Services
+{
+ public class EncryptionService
+ {
+ private readonly string _encryptionKey;
+ private readonly bool _encryptionEnabled;
+
+ public EncryptionService(string? encryptionKey, bool encryptionEnabled = false)
+ {
+ _encryptionEnabled = encryptionEnabled;
+
+ if (encryptionEnabled && string.IsNullOrWhiteSpace(encryptionKey))
+ {
+ throw new ArgumentException("Encryption key is required when encryption is enabled");
+ }
+
+ _encryptionKey = encryptionKey ?? string.Empty;
+ }
+
+ public bool IsEncryptionEnabled => _encryptionEnabled;
+
+ ///
+ /// Encrypts data using AES-256-CBC
+ ///
+ /// Data to encrypt
+ /// Encrypted data as base64 string
+ public async Task EncryptAsync(string plaintext)
+ {
+ if (!_encryptionEnabled)
+ {
+ return plaintext;
+ }
+
+ if (string.IsNullOrEmpty(plaintext))
+ {
+ return string.Empty;
+ }
+
+ try
+ {
+ using var aes = Aes.Create();
+ aes.KeySize = 256;
+ aes.Mode = CipherMode.CBC;
+ aes.Padding = PaddingMode.PKCS7;
+
+ // Derive key from the provided encryption key
+ var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
+ aes.Key = key;
+
+ // Generate random IV
+ aes.GenerateIV();
+
+ using var encryptor = aes.CreateEncryptor();
+ var plaintextBytes = Encoding.UTF8.GetBytes(plaintext);
+ var ciphertext = encryptor.TransformFinalBlock(plaintextBytes, 0, plaintextBytes.Length);
+
+ // Combine IV and ciphertext
+ var result = new byte[aes.IV.Length + ciphertext.Length];
+ Buffer.BlockCopy(aes.IV, 0, result, 0, aes.IV.Length);
+ Buffer.BlockCopy(ciphertext, 0, result, aes.IV.Length, ciphertext.Length);
+
+ return Convert.ToBase64String(result);
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Encryption failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Decrypts data using AES-256-CBC
+ ///
+ /// Encrypted data as base64 string
+ /// Decrypted data
+ public async Task DecryptAsync(string ciphertext)
+ {
+ if (!_encryptionEnabled)
+ {
+ return ciphertext;
+ }
+
+ if (string.IsNullOrEmpty(ciphertext))
+ {
+ return string.Empty;
+ }
+
+ try
+ {
+ var encryptedData = Convert.FromBase64String(ciphertext);
+
+ using var aes = Aes.Create();
+ aes.KeySize = 256;
+ aes.Mode = CipherMode.CBC;
+ aes.Padding = PaddingMode.PKCS7;
+
+ // Derive key from the provided encryption key
+ var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
+ aes.Key = key;
+
+ // Extract IV and ciphertext
+ var ivSize = aes.IV.Length;
+ var ciphertextSize = encryptedData.Length - ivSize;
+
+ if (ciphertextSize < 0)
+ {
+ throw new ArgumentException("Invalid encrypted data format");
+ }
+
+ var iv = new byte[ivSize];
+ var ciphertextBytes = new byte[ciphertextSize];
+
+ Buffer.BlockCopy(encryptedData, 0, iv, 0, ivSize);
+ Buffer.BlockCopy(encryptedData, ivSize, ciphertextBytes, 0, ciphertextSize);
+
+ aes.IV = iv;
+
+ using var decryptor = aes.CreateDecryptor();
+ var plaintext = decryptor.TransformFinalBlock(ciphertextBytes, 0, ciphertextBytes.Length);
+
+ return Encoding.UTF8.GetString(plaintext);
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Decryption failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Encrypts a file
+ ///
+ /// Path to the source file
+ /// Path for the encrypted file
+ public async Task EncryptFileAsync(string sourceFilePath, string destinationFilePath)
+ {
+ if (!_encryptionEnabled)
+ {
+ // If encryption is disabled, just copy the file
+ File.Copy(sourceFilePath, destinationFilePath, true);
+ return;
+ }
+
+ try
+ {
+ using var sourceStream = File.OpenRead(sourceFilePath);
+ using var destinationStream = File.Create(destinationFilePath);
+
+ using var aes = Aes.Create();
+ aes.KeySize = 256;
+ aes.Mode = CipherMode.CBC;
+ aes.Padding = PaddingMode.PKCS7;
+
+ var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
+ aes.Key = key;
+ aes.GenerateIV();
+
+ // Write IV to the beginning of the file
+ await destinationStream.WriteAsync(aes.IV);
+
+ using var encryptor = aes.CreateEncryptor();
+ using var cryptoStream = new CryptoStream(destinationStream, encryptor, CryptoStreamMode.Write);
+
+ await sourceStream.CopyToAsync(cryptoStream);
+ await cryptoStream.FlushFinalBlockAsync();
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"File encryption failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Decrypts a file
+ ///
+ /// Path to the encrypted file
+ /// Path for the decrypted file
+ public async Task DecryptFileAsync(string sourceFilePath, string destinationFilePath)
+ {
+ if (!_encryptionEnabled)
+ {
+ // If encryption is disabled, just copy the file
+ File.Copy(sourceFilePath, destinationFilePath, true);
+ return;
+ }
+
+ try
+ {
+ using var sourceStream = File.OpenRead(sourceFilePath);
+ using var destinationStream = File.Create(destinationFilePath);
+
+ using var aes = Aes.Create();
+ aes.KeySize = 256;
+ aes.Mode = CipherMode.CBC;
+ aes.Padding = PaddingMode.PKCS7;
+
+ var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
+ aes.Key = key;
+
+ // Read IV from the beginning of the file
+ var iv = new byte[aes.IV.Length];
+ await sourceStream.ReadAsync(iv);
+ aes.IV = iv;
+
+ using var decryptor = aes.CreateDecryptor();
+ using var cryptoStream = new CryptoStream(sourceStream, decryptor, CryptoStreamMode.Read);
+
+ await cryptoStream.CopyToAsync(destinationStream);
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"File decryption failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Generates a secure encryption key
+ ///
+ /// Size of the key in bits (default: 256)
+ /// Base64 encoded encryption key
+ public static string GenerateEncryptionKey(int keySize = 256)
+ {
+ if (keySize != 128 && keySize != 192 && keySize != 256)
+ {
+ throw new ArgumentException("Key size must be 128, 192, or 256 bits");
+ }
+
+ using var aes = Aes.Create();
+ aes.KeySize = keySize;
+ aes.GenerateKey();
+
+ return Convert.ToBase64String(aes.Key);
+ }
+
+ ///
+ /// Validates an encryption key
+ ///
+ /// The encryption key to validate
+ /// True if the key is valid, false otherwise
+ public static bool ValidateEncryptionKey(string key)
+ {
+ if (string.IsNullOrWhiteSpace(key))
+ {
+ return false;
+ }
+
+ try
+ {
+ var keyBytes = Convert.FromBase64String(key);
+ return keyBytes.Length == 16 || keyBytes.Length == 24 || keyBytes.Length == 32; // 128, 192, or 256 bits
+ }
+ catch
+ {
+ return false;
+ }
+ }
+
+ ///
+ /// Derives a key from a password using PBKDF2
+ ///
+ /// The password to derive the key from
+ /// Size of the derived key in bytes
+ /// The derived key
+ private static byte[] DeriveKey(string password, int keySize)
+ {
+ // Create a deterministic salt from the password hash
+ using var sha256 = SHA256.Create();
+ var passwordHash = sha256.ComputeHash(Encoding.UTF8.GetBytes(password));
+ var salt = new byte[32];
+ Array.Copy(passwordHash, salt, Math.Min(passwordHash.Length, salt.Length));
+
+ using var pbkdf2 = new Rfc2898DeriveBytes(password, salt, 10000, HashAlgorithmName.SHA256);
+ return pbkdf2.GetBytes(keySize);
+ }
+
+ ///
+ /// Creates a checksum of encrypted data for integrity verification
+ ///
+ /// The data to create a checksum for
+ /// SHA-256 hash of the data
+ public static string CreateChecksum(byte[] data)
+ {
+ using var sha256 = SHA256.Create();
+ var hash = sha256.ComputeHash(data);
+ return Convert.ToBase64String(hash);
+ }
+
+ ///
+ /// Creates a checksum of a string
+ ///
+ /// The string to create a checksum for
+ /// SHA-256 hash of the string
+ public static string CreateChecksum(string data)
+ {
+ var bytes = Encoding.UTF8.GetBytes(data);
+ return CreateChecksum(bytes);
+ }
+
+ ///
+ /// Verifies the integrity of encrypted data
+ ///
+ /// The data to verify
+ /// The expected checksum
+ /// True if the checksum matches, false otherwise
+ public static bool VerifyChecksum(byte[] data, string expectedChecksum)
+ {
+ var actualChecksum = CreateChecksum(data);
+ return actualChecksum.Equals(expectedChecksum, StringComparison.Ordinal);
+ }
+
+ ///
+ /// Verifies the integrity of a string
+ ///
+ /// The string to verify
+ /// The expected checksum
+ /// True if the checksum matches, false otherwise
+ public static bool VerifyChecksum(string data, string expectedChecksum)
+ {
+ var actualChecksum = CreateChecksum(data);
+ return actualChecksum.Equals(expectedChecksum, StringComparison.Ordinal);
+ }
+ }
+}
\ No newline at end of file
diff --git a/Services/EventStore.cs b/Services/EventStore.cs
new file mode 100644
index 0000000..2363d34
--- /dev/null
+++ b/Services/EventStore.cs
@@ -0,0 +1,140 @@
+using System.Text;
+using System.Text.Json;
+using DatabaseSnapshotsService.Models;
+
+namespace DatabaseSnapshotsService.Services
+{
+ public class EventStore
+ {
+ private readonly EventStoreConfig _config;
+ private readonly string _eventsPath;
+ private readonly string _indexPath;
+ private readonly object _writeLock = new object();
+ private long _currentEventId = 0;
+ private string _currentEventFile = string.Empty;
+ private StreamWriter? _currentWriter;
+ private long _currentFileSize = 0;
+
+ public EventStore(EventStoreConfig config)
+ {
+ _config = config;
+ _eventsPath = Path.GetFullPath(config.Path);
+ _indexPath = Path.Combine(_eventsPath, "index");
+
+ // Ensure directories exist
+ Directory.CreateDirectory(_eventsPath);
+ Directory.CreateDirectory(_indexPath);
+
+ // Load next event ID and initialize current file
+ LoadNextEventId();
+ InitializeCurrentFile();
+ }
+
+ public async Task StoreEventAsync(DatabaseEvent evt)
+ {
+ lock (_writeLock)
+ {
+ evt.Id = ++_currentEventId;
+ evt.Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
+ evt.Checksum = CalculateEventChecksum(evt);
+
+ // Check if we need to rotate the file
+ if (_currentFileSize > _config.MaxFileSize || _currentWriter == null)
+ {
+ RotateEventFile();
+ }
+
+ // Write event to current file
+ var json = JsonSerializer.Serialize(evt);
+ _currentWriter?.WriteLine(json);
+ _currentWriter?.Flush();
+ _currentFileSize += json.Length + Environment.NewLine.Length;
+
+ // Update index
+ UpdateEventIndex(evt);
+
+ return evt.Id;
+ }
+ }
+
+ public async Task GetLastEventIdAsync()
+ {
+ var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
+ long lastId = 0;
+
+ foreach (var file in eventFiles.OrderByDescending(f => f))
+ {
+ var lines = await File.ReadAllLinesAsync(file);
+ if (lines.Length > 0)
+ {
+ var lastLine = lines.Last();
+ try
+ {
+ var lastEvent = JsonSerializer.Deserialize(lastLine);
+ if (lastEvent != null && lastEvent.Id > lastId)
+ {
+ lastId = lastEvent.Id;
+ }
+ }
+ catch (JsonException)
+ {
+ // Skip malformed JSON
+ continue;
+ }
+ }
+ }
+
+ return lastId;
+ }
+
+ private void LoadNextEventId()
+ {
+ var lastId = GetLastEventIdAsync().GetAwaiter().GetResult();
+ _currentEventId = lastId;
+ }
+
+ private void InitializeCurrentFile()
+ {
+ var timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
+ _currentEventFile = Path.Combine(_eventsPath, $"events_{timestamp}.json");
+ _currentWriter = new StreamWriter(_currentEventFile, true, Encoding.UTF8);
+ _currentFileSize = 0;
+ }
+
+ private void RotateEventFile()
+ {
+ // Close current writer and delete file if empty
+ if (_currentWriter != null)
+ {
+ _currentWriter.Close();
+ if (!string.IsNullOrEmpty(_currentEventFile) && File.Exists(_currentEventFile))
+ {
+ var fileInfo = new FileInfo(_currentEventFile);
+ if (fileInfo.Length == 0)
+ {
+ File.Delete(_currentEventFile);
+ }
+ }
+ }
+ _currentEventFile = Path.Combine(_eventsPath, $"events_{DateTime.UtcNow:yyyyMMdd_HHmmss}_{_currentEventId}.json");
+ _currentWriter = new StreamWriter(_currentEventFile, append: false, Encoding.UTF8);
+ _currentFileSize = 0;
+ }
+
+ private string CalculateEventChecksum(DatabaseEvent evt)
+ {
+ var data = $"{evt.Id}{evt.Timestamp}{evt.Type}{evt.Table}{evt.Operation}{evt.Data}{evt.BinlogPosition}{evt.ServerId}";
+ using var sha256 = System.Security.Cryptography.SHA256.Create();
+ var hash = sha256.ComputeHash(Encoding.UTF8.GetBytes(data));
+ return Convert.ToHexString(hash).ToLower();
+ }
+
+ private void UpdateEventIndex(DatabaseEvent evt)
+ {
+ var indexFile = Path.Combine(_indexPath, Path.GetFileNameWithoutExtension(_currentEventFile) + ".idx");
+ var indexEntry = $"{evt.Id},{evt.Timestamp},{evt.Table},{evt.Operation}";
+
+ File.AppendAllText(indexFile, indexEntry + Environment.NewLine);
+ }
+ }
+}
\ No newline at end of file
diff --git a/Services/OptimizedFileService.cs b/Services/OptimizedFileService.cs
new file mode 100644
index 0000000..95a0dc9
--- /dev/null
+++ b/Services/OptimizedFileService.cs
@@ -0,0 +1,172 @@
+using EasyCompressor;
+using System.Security.Cryptography;
+using System.Text;
+
+namespace DatabaseSnapshotsService.Services
+{
+ public class OptimizedFileService
+ {
+ private const int DefaultBufferSize = 64 * 1024; // 64KB buffer
+ private const int LargeFileThreshold = 100 * 1024 * 1024; // 100MB
+ private const int ParallelThreshold = 50 * 1024 * 1024; // 50MB
+ private readonly LZ4Compressor _lz4 = new LZ4Compressor();
+
+ ///
+ /// Streaming LZ4 compression using EasyCompressor.LZ4
+ ///
+ public async Task CompressFileStreamingAsync(string sourcePath, string destinationPath, int bufferSize = DefaultBufferSize)
+ {
+ try
+ {
+ using var sourceStream = new FileStream(sourcePath, FileMode.Open, FileAccess.Read, FileShare.Read, bufferSize, FileOptions.Asynchronous);
+ using var destinationStream = new FileStream(destinationPath, FileMode.Create, FileAccess.Write, FileShare.None, bufferSize, FileOptions.Asynchronous);
+ await Task.Run(() => _lz4.Compress(sourceStream, destinationStream));
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Streaming LZ4 compression failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Streaming LZ4 decompression using EasyCompressor.LZ4
+ ///
+ public async Task DecompressFileStreamingAsync(string sourcePath, string destinationPath, int bufferSize = DefaultBufferSize)
+ {
+ try
+ {
+ using var sourceStream = new FileStream(sourcePath, FileMode.Open, FileAccess.Read, FileShare.Read, bufferSize, FileOptions.Asynchronous);
+ using var destinationStream = new FileStream(destinationPath, FileMode.Create, FileAccess.Write, FileShare.None, bufferSize, FileOptions.Asynchronous);
+ await Task.Run(() => _lz4.Decompress(sourceStream, destinationStream));
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Streaming LZ4 decompression failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Optimized checksum calculation using streaming
+ ///
+ public async Task CalculateChecksumStreamingAsync(string filePath, int bufferSize = DefaultBufferSize)
+ {
+ try
+ {
+ using var sha256 = SHA256.Create();
+ using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, bufferSize, FileOptions.Asynchronous);
+
+ var buffer = new byte[bufferSize];
+ int bytesRead;
+
+ while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0)
+ {
+ sha256.TransformBlock(buffer, 0, bytesRead, null, 0);
+ }
+
+ sha256.TransformFinalBlock(Array.Empty(), 0, 0);
+ return Convert.ToBase64String(sha256.Hash!);
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Checksum calculation failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Parallel checksum calculation for large files
+ ///
+ public async Task CalculateChecksumParallelAsync(string filePath)
+ {
+ try
+ {
+ var fileInfo = new FileInfo(filePath);
+ if (fileInfo.Length < LargeFileThreshold)
+ {
+ return await CalculateChecksumStreamingAsync(filePath);
+ }
+
+ // For large files, use parallel processing
+ using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, DefaultBufferSize, FileOptions.Asynchronous);
+ var fileBytes = new byte[fileInfo.Length];
+ await stream.ReadAsync(fileBytes, 0, (int)fileInfo.Length);
+
+ return await Task.Run(() =>
+ {
+ using var sha256 = SHA256.Create();
+ return Convert.ToBase64String(sha256.ComputeHash(fileBytes));
+ });
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Parallel checksum calculation failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Optimized file reading with memory mapping for very large files
+ ///
+ public async Task ReadFileOptimizedAsync(string filePath)
+ {
+ try
+ {
+ var fileInfo = new FileInfo(filePath);
+
+ if (fileInfo.Length > LargeFileThreshold)
+ {
+ // For very large files, use streaming
+ using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, DefaultBufferSize, FileOptions.Asynchronous);
+ using var memoryStream = new MemoryStream();
+ await stream.CopyToAsync(memoryStream);
+ return memoryStream.ToArray();
+ }
+ else
+ {
+ // For smaller files, use direct read
+ return await File.ReadAllBytesAsync(filePath);
+ }
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"File reading failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Optimized file writing with buffering
+ ///
+ public async Task WriteFileOptimizedAsync(string filePath, byte[] data, int bufferSize = DefaultBufferSize)
+ {
+ try
+ {
+ using var stream = new FileStream(filePath, FileMode.Create, FileAccess.Write, FileShare.None, bufferSize, FileOptions.Asynchronous);
+ await stream.WriteAsync(data, 0, data.Length);
+ await stream.FlushAsync();
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"File writing failed: {ex.Message}", ex);
+ }
+ }
+
+ ///
+ /// Get optimal buffer size based on file size
+ ///
+ public static int GetOptimalBufferSize(long fileSize)
+ {
+ if (fileSize < 1024 * 1024) // < 1MB
+ return 8 * 1024; // 8KB
+ else if (fileSize < 100 * 1024 * 1024) // < 100MB
+ return 64 * 1024; // 64KB
+ else
+ return 256 * 1024; // 256KB
+ }
+
+ ///
+ /// Determine if parallel processing should be used
+ ///
+ public static bool ShouldUseParallelProcessing(long fileSize)
+ {
+ return fileSize >= ParallelThreshold;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Services/RecoveryService.cs b/Services/RecoveryService.cs
new file mode 100644
index 0000000..e2c65c7
--- /dev/null
+++ b/Services/RecoveryService.cs
@@ -0,0 +1,599 @@
+using System.Text.Json;
+using DatabaseSnapshotsService.Models;
+using MySqlConnector;
+using System.Text;
+
+namespace DatabaseSnapshotsService.Services
+{
+ public class RecoveryService
+ {
+ private readonly SnapshotConfiguration _config;
+ private readonly string _recoveryPointsPath;
+ private readonly string _eventsPath;
+ private readonly OptimizedFileService _fileService;
+ private readonly EncryptionService _encryptionService;
+ private int _nextPointId = 1;
+
+ public RecoveryService(SnapshotConfiguration config)
+ {
+ _config = config;
+ _recoveryPointsPath = Path.Combine(config.EventStore.Path, "recovery_points");
+ _eventsPath = config.EventStore.Path;
+ _fileService = new OptimizedFileService();
+
+ // Initialize encryption service - match SnapshotService pattern
+ _encryptionService = new EncryptionService(
+ config.Security.EncryptionKey,
+ config.Security.Encryption
+ );
+
+ // Ensure directories exist
+ Directory.CreateDirectory(_recoveryPointsPath);
+ Directory.CreateDirectory(_eventsPath);
+
+ // Load next ID from existing recovery points
+ LoadNextPointId();
+ }
+
+ public async Task CreateRecoveryPointAsync(string name, string? description = null)
+ {
+ // Check if name already exists
+ if (await GetRecoveryPointAsync(name) != null)
+ {
+ throw new ArgumentException($"Recovery point '{name}' already exists");
+ }
+
+ var point = new RecoveryPoint
+ {
+ Id = _nextPointId++,
+ Name = name,
+ Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
+ Description = description,
+ CreatedAt = DateTime.UtcNow,
+ EventCount = await GetTotalEventCountAsync(),
+ LastEventId = await GetLastEventIdAsync()
+ };
+
+ // Save recovery point
+ await SaveRecoveryPointAsync(point);
+
+ return point;
+ }
+
+ public async Task> ListRecoveryPointsAsync()
+ {
+ var points = new List();
+ var pointFiles = Directory.GetFiles(_recoveryPointsPath, "*.json");
+
+ foreach (var file in pointFiles)
+ {
+ try
+ {
+ var jsonBytes = await _fileService.ReadFileOptimizedAsync(file);
+ var json = Encoding.UTF8.GetString(jsonBytes);
+ var point = JsonSerializer.Deserialize(json);
+ if (point != null)
+ {
+ points.Add(point);
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Warning: Could not load recovery point from {file}: {ex.Message}");
+ }
+ }
+
+ return points.OrderByDescending(p => p.CreatedAt).ToList();
+ }
+
+ public async Task GetRecoveryPointAsync(string name)
+ {
+ var pointFiles = Directory.GetFiles(_recoveryPointsPath, "*.json");
+
+ foreach (var file in pointFiles)
+ {
+ try
+ {
+ var jsonBytes = await _fileService.ReadFileOptimizedAsync(file);
+ var json = Encoding.UTF8.GetString(jsonBytes);
+ var point = JsonSerializer.Deserialize(json);
+ if (point?.Name == name)
+ {
+ return point;
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Warning: Could not load recovery point from {file}: {ex.Message}");
+ }
+ }
+
+ return null;
+ }
+
+ public async Task PreviewRestoreAsync(long timestamp)
+ {
+ var preview = new RestorePreview
+ {
+ TargetTimestamp = timestamp,
+ EventCount = 0,
+ AffectedTables = new List(),
+ EstimatedDuration = TimeSpan.Zero,
+ Warnings = new List()
+ };
+
+ // Find the closest snapshot before the target timestamp
+ var snapshotService = new SnapshotService(_config);
+ var snapshots = await snapshotService.ListSnapshotsAsync();
+ var closestSnapshot = snapshots
+ .Where(s => s.Timestamp <= timestamp)
+ .OrderByDescending(s => s.Timestamp)
+ .FirstOrDefault();
+
+ if (closestSnapshot != null)
+ {
+ preview.SnapshotId = closestSnapshot.Id;
+ preview.Warnings.Add($"Will use snapshot {closestSnapshot.Id} as base");
+ }
+ else
+ {
+ preview.Warnings.Add("No suitable snapshot found - will restore from scratch");
+ }
+
+ // Count events that would be applied
+ var events = await GetEventsInRangeAsync(closestSnapshot?.Timestamp ?? 0, timestamp);
+ preview.EventCount = events.Count;
+
+ // Get affected tables
+ preview.AffectedTables = events
+ .Select(e => e.Table)
+ .Distinct()
+ .ToList();
+
+ // Estimate duration (rough calculation)
+ preview.EstimatedDuration = TimeSpan.FromSeconds(events.Count * 0.001); // 1ms per event
+
+ return preview;
+ }
+
+ public async Task RestoreAsync(long timestamp)
+ {
+ try
+ {
+ Console.WriteLine("=== PERFORMING ACTUAL RECOVERY ===");
+ Console.WriteLine("This will modify the target database!");
+ Console.WriteLine($"Starting restore to timestamp {timestamp}...");
+
+ // Find the target snapshot and build restore chain
+ var (targetSnapshot, restoreChain) = await BuildRestoreChainAsync(timestamp);
+ if (targetSnapshot == null)
+ {
+ throw new Exception($"No snapshot found for timestamp {timestamp}");
+ }
+
+ Console.WriteLine($"Target snapshot: {targetSnapshot.Id} ({targetSnapshot.Type})");
+ Console.WriteLine($"Restore chain: {restoreChain.Count} snapshots");
+
+ // Restore the full snapshot (first in chain)
+ var fullSnapshot = restoreChain.First();
+ Console.WriteLine($"Restoring full snapshot {fullSnapshot.Id}...");
+ await RestoreFromSnapshotAsync(fullSnapshot);
+
+ // Apply incremental snapshots in order
+ var incrementals = restoreChain.Skip(1).ToList();
+ if (incrementals.Any())
+ {
+ Console.WriteLine($"Applying {incrementals.Count} incremental snapshots...");
+ foreach (var incremental in incrementals)
+ {
+ Console.WriteLine($"Applying incremental snapshot {incremental.Id}...");
+ await ApplyIncrementalSnapshotAsync(incremental);
+ }
+ }
+
+ Console.WriteLine("Validating restore...");
+ await ValidateRestoreAsync();
+ Console.WriteLine("Database validation passed");
+ Console.WriteLine("Restore completed successfully");
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Restore failed: {ex.Message}");
+ throw;
+ }
+ }
+
+ private async Task<(SnapshotInfo? TargetSnapshot, List RestoreChain)> BuildRestoreChainAsync(long timestamp)
+ {
+ var snapshotService = new SnapshotService(_config);
+ var snapshots = await snapshotService.ListSnapshotsAsync();
+
+ // Find the target snapshot (closest to timestamp)
+ var targetSnapshot = snapshots
+ .Where(s => s.Timestamp <= timestamp)
+ .OrderByDescending(s => s.Timestamp)
+ .FirstOrDefault();
+
+ if (targetSnapshot == null)
+ return (null, new List());
+
+ // Build restore chain: full snapshot + all incrementals up to target
+ var restoreChain = new List();
+
+ if (targetSnapshot.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
+ {
+ // Target is a full snapshot, just restore it
+ restoreChain.Add(targetSnapshot);
+ }
+ else
+ {
+ // Target is incremental, need to find the full snapshot and all incrementals
+ var current = targetSnapshot;
+ var chain = new List();
+
+ // Walk backwards to find the full snapshot
+ while (current != null)
+ {
+ chain.Insert(0, current); // Add to front to maintain order
+
+ if (current.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
+ break;
+
+ // Find parent snapshot
+ current = snapshots.FirstOrDefault(s => s.Id == current.ParentSnapshotId);
+ }
+
+ restoreChain = chain;
+ }
+
+ return (targetSnapshot, restoreChain);
+ }
+
+ private async Task ApplyIncrementalSnapshotAsync(SnapshotInfo incremental)
+ {
+ Console.WriteLine($"Applying incremental snapshot {incremental.Id}...");
+
+ if (!File.Exists(incremental.FilePath))
+ {
+ throw new FileNotFoundException($"Incremental snapshot file not found: {incremental.FilePath}");
+ }
+
+ // Read and decompress/decrypt the snapshot file
+ var sqlContent = await ReadSnapshotFileAsync(incremental.FilePath);
+
+ // Extract connection details from configuration
+ var connectionString = _config.ConnectionString;
+ var server = ExtractValue(connectionString, "Server") ?? "localhost";
+ var port = ExtractValue(connectionString, "Port") ?? "3306";
+ var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
+ var userId = ExtractValue(connectionString, "Uid") ?? "root";
+ var password = ExtractValue(connectionString, "Pwd") ?? "";
+
+ // Build mysql command arguments
+ var mysqlArgs = $"-h{server} -P{port} -u{userId}";
+ if (!string.IsNullOrEmpty(password))
+ {
+ mysqlArgs += $" -p{password}";
+ }
+ mysqlArgs += $" {database}";
+
+ // Apply the SQL content using mysql via stdin
+ var startInfo = new System.Diagnostics.ProcessStartInfo
+ {
+ FileName = "mysql",
+ Arguments = mysqlArgs,
+ RedirectStandardInput = true,
+ RedirectStandardOutput = true,
+ RedirectStandardError = true,
+ UseShellExecute = false,
+ CreateNoWindow = true
+ };
+
+ using var process = System.Diagnostics.Process.Start(startInfo);
+ if (process != null)
+ {
+ // Write the SQL content to mysql stdin
+ await process.StandardInput.WriteAsync(sqlContent);
+ await process.StandardInput.FlushAsync();
+ process.StandardInput.Close();
+
+ string stdOut = await process.StandardOutput.ReadToEndAsync();
+ string stdErr = await process.StandardError.ReadToEndAsync();
+ await process.WaitForExitAsync();
+
+ if (process.ExitCode != 0)
+ {
+ Console.WriteLine($"[mysql stdout] {stdOut}");
+ Console.WriteLine($"[mysql stderr] {stdErr}");
+ throw new Exception($"mysql failed with exit code {process.ExitCode}");
+ }
+ }
+ }
+
+ private async Task GetTotalEventCountAsync()
+ {
+ var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
+ long totalCount = 0;
+
+ foreach (var file in eventFiles)
+ {
+ try
+ {
+ var lines = await File.ReadAllLinesAsync(file);
+ totalCount += lines.Length;
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Warning: Could not read event file {file}: {ex.Message}");
+ }
+ }
+
+ return totalCount;
+ }
+
+ private async Task GetLastEventIdAsync()
+ {
+ var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
+ long lastId = 0;
+
+ foreach (var file in eventFiles.OrderByDescending(f => f))
+ {
+ try
+ {
+ var lines = await File.ReadAllLinesAsync(file);
+ if (lines.Length > 0)
+ {
+ var lastLine = lines.Last();
+ var lastEvent = JsonSerializer.Deserialize(lastLine);
+ if (lastEvent != null && lastEvent.Id > lastId)
+ {
+ lastId = lastEvent.Id;
+ }
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Warning: Could not read event file {file}: {ex.Message}");
+ }
+ }
+
+ return lastId;
+ }
+
+ private async Task> GetEventsInRangeAsync(long fromTimestamp, long toTimestamp)
+ {
+ var events = new List();
+ var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
+
+ foreach (var file in eventFiles)
+ {
+ try
+ {
+ var lines = await File.ReadAllLinesAsync(file);
+ foreach (var line in lines)
+ {
+ var evt = JsonSerializer.Deserialize(line);
+ if (evt != null && evt.Timestamp >= fromTimestamp && evt.Timestamp <= toTimestamp)
+ {
+ events.Add(evt);
+ }
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Warning: Could not read event file {file}: {ex.Message}");
+ }
+ }
+
+ return events.OrderBy(e => e.Timestamp).ToList();
+ }
+
+ private async Task RestoreFromSnapshotAsync(SnapshotInfo snapshot)
+ {
+ Console.WriteLine($"Restoring database from snapshot {snapshot.Id}...");
+
+ if (!File.Exists(snapshot.FilePath))
+ {
+ throw new FileNotFoundException($"Snapshot file not found: {snapshot.FilePath}");
+ }
+
+ // Use programmatic restoration (handles encryption/compression better)
+ await RestoreProgrammaticallyAsync(snapshot);
+ }
+
+ private async Task RestoreProgrammaticallyAsync(SnapshotInfo snapshot)
+ {
+ // Read and decompress the snapshot file
+ var sqlContent = await ReadSnapshotFileAsync(snapshot.FilePath);
+
+ // Create a temporary file with the SQL content
+ var tempFile = Path.GetTempFileName();
+ await File.WriteAllTextAsync(tempFile, sqlContent);
+
+ try
+ {
+ // Extract connection details from configuration
+ var connectionString = _config.ConnectionString;
+ var server = ExtractValue(connectionString, "Server") ?? "localhost";
+ var port = ExtractValue(connectionString, "Port") ?? "3306";
+ var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
+ var userId = ExtractValue(connectionString, "Uid") ?? "root";
+ var password = ExtractValue(connectionString, "Pwd") ?? "";
+
+ // Build mysql command arguments
+ var mysqlArgs = $"-h{server} -P{port} -u{userId}";
+ if (!string.IsNullOrEmpty(password))
+ {
+ mysqlArgs += $" -p{password}";
+ }
+ mysqlArgs += $" {database}";
+
+ // Use mysql command to restore the database
+ var startInfo = new System.Diagnostics.ProcessStartInfo
+ {
+ FileName = "mysql",
+ Arguments = mysqlArgs,
+ RedirectStandardInput = true,
+ RedirectStandardOutput = true,
+ RedirectStandardError = true,
+ UseShellExecute = false,
+ CreateNoWindow = true
+ };
+
+ using var process = new System.Diagnostics.Process { StartInfo = startInfo };
+
+ process.ErrorDataReceived += (sender, e) =>
+ {
+ if (!string.IsNullOrEmpty(e.Data))
+ {
+ Console.WriteLine($"[mysql restore] {e.Data}");
+ }
+ };
+
+ process.Start();
+ process.BeginErrorReadLine();
+
+ // Send the SQL content to mysql via stdin
+ using var writer = process.StandardInput;
+ await writer.WriteAsync(sqlContent);
+ await writer.FlushAsync();
+ writer.Close();
+
+ await process.WaitForExitAsync();
+
+ if (process.ExitCode != 0)
+ {
+ throw new Exception($"mysql restore failed with exit code {process.ExitCode}");
+ }
+
+ Console.WriteLine("Database restore completed successfully using mysql command");
+ }
+ finally
+ {
+ // Clean up temporary file
+ if (File.Exists(tempFile))
+ {
+ File.Delete(tempFile);
+ }
+ }
+ }
+
+ private async Task ReadSnapshotFileAsync(string filePath)
+ {
+ try
+ {
+ // Check if file is encrypted and compressed
+ if (filePath.EndsWith(".lz4.enc"))
+ {
+ // First decrypt, then decompress
+ var decryptedPath = filePath.Replace(".lz4.enc", ".lz4.tmp");
+ var decompressedPath = filePath.Replace(".lz4.enc", ".sql.tmp");
+
+ try
+ {
+ // Decrypt the file using the instance field
+ await _encryptionService.DecryptFileAsync(filePath, decryptedPath);
+
+ // Decompress the decrypted file
+ await _fileService.DecompressFileStreamingAsync(decryptedPath, decompressedPath);
+
+ // Read the final SQL content
+ var content = await _fileService.ReadFileOptimizedAsync(decompressedPath);
+ return Encoding.UTF8.GetString(content);
+ }
+ finally
+ {
+ // Clean up temporary files
+ if (File.Exists(decryptedPath)) File.Delete(decryptedPath);
+ if (File.Exists(decompressedPath)) File.Delete(decompressedPath);
+ }
+ }
+ else if (filePath.EndsWith(".lz4"))
+ {
+ // Only compressed, not encrypted
+ var tempPath = filePath.Replace(".lz4", ".tmp");
+ await _fileService.DecompressFileStreamingAsync(filePath, tempPath);
+
+ var content = await _fileService.ReadFileOptimizedAsync(tempPath);
+ File.Delete(tempPath); // Clean up temp file
+ return Encoding.UTF8.GetString(content);
+ }
+ else if (filePath.EndsWith(".enc"))
+ {
+ // Only encrypted, not compressed
+ var tempPath = filePath.Replace(".enc", ".tmp");
+ await _encryptionService.DecryptFileAsync(filePath, tempPath);
+
+ var content = await _fileService.ReadFileOptimizedAsync(tempPath);
+ File.Delete(tempPath); // Clean up temp file
+ return Encoding.UTF8.GetString(content);
+ }
+ else
+ {
+ // Plain text file
+ var content = await _fileService.ReadFileOptimizedAsync(filePath);
+ return Encoding.UTF8.GetString(content);
+ }
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Failed to read snapshot file {filePath}: {ex.Message}", ex);
+ }
+ }
+
+ private async Task ValidateRestoreAsync()
+ {
+ // Basic validation - check if database is accessible and has expected data
+ using var connection = new MySqlConnection(_config.ConnectionString);
+ await connection.OpenAsync();
+
+ // Check if we can query the database
+ using var command = new MySqlCommand("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = DATABASE()", connection);
+ var tableCount = await command.ExecuteScalarAsync();
+
+ if (Convert.ToInt32(tableCount) == 0)
+ {
+ throw new Exception("Database validation failed: No tables found after restore");
+ }
+ }
+
+ private async Task SaveRecoveryPointAsync(RecoveryPoint point)
+ {
+ var pointFile = Path.Combine(_recoveryPointsPath, $"{point.Name}.json");
+ var json = JsonSerializer.Serialize(point, new JsonSerializerOptions { WriteIndented = true });
+ var jsonBytes = Encoding.UTF8.GetBytes(json);
+
+ await _fileService.WriteFileOptimizedAsync(pointFile, jsonBytes);
+ }
+
+ private void LoadNextPointId()
+ {
+ var pointFiles = Directory.GetFiles(_recoveryPointsPath, "*.json");
+ if (pointFiles.Length > 0)
+ {
+ var maxId = pointFiles
+ .Select(f => Path.GetFileNameWithoutExtension(f))
+ .Where(name => int.TryParse(name, out _))
+ .Select(int.Parse)
+ .DefaultIfEmpty(0)
+ .Max();
+
+ _nextPointId = maxId + 1;
+ }
+ }
+
+ private string? ExtractValue(string connectionString, string key)
+ {
+ var pairs = connectionString.Split(';');
+ foreach (var pair in pairs)
+ {
+ var keyValue = pair.Split('=');
+ if (keyValue.Length == 2 && keyValue[0].Trim().Equals(key, StringComparison.OrdinalIgnoreCase))
+ {
+ return keyValue[1].Trim();
+ }
+ }
+ return null;
+ }
+ }
+}
\ No newline at end of file
diff --git a/Services/SnapshotService.cs b/Services/SnapshotService.cs
new file mode 100644
index 0000000..d155a7f
--- /dev/null
+++ b/Services/SnapshotService.cs
@@ -0,0 +1,633 @@
+using System.ComponentModel.DataAnnotations;
+using System.IO.Compression;
+using System.Security.Cryptography;
+using System.Text;
+using System.Text.Json;
+using DatabaseSnapshotsService.Models;
+using MySqlConnector;
+using DatabaseSnapshotsService.Services;
+
+namespace DatabaseSnapshotsService.Services
+{
+ public class SnapshotService
+ {
+ private readonly SnapshotConfiguration _config;
+ private readonly string _snapshotsPath;
+ private readonly string _metadataPath;
+ private readonly EncryptionService _encryptionService;
+ private readonly OptimizedFileService _fileService;
+ private int _nextId = 1;
+
+ public SnapshotService(SnapshotConfiguration config)
+ {
+ _config = config;
+ _snapshotsPath = Path.GetFullPath(config.SnapshotStorage.Path);
+ _metadataPath = Path.Combine(_snapshotsPath, "metadata");
+
+ // Initialize encryption service - match RecoveryService pattern
+ _encryptionService = new EncryptionService(
+ config.Security.EncryptionKey,
+ config.Security.Encryption
+ );
+
+ _fileService = new OptimizedFileService();
+
+ // Ensure directories exist
+ Directory.CreateDirectory(_snapshotsPath);
+ Directory.CreateDirectory(_metadataPath);
+
+ // Load next ID from existing snapshots
+ LoadNextId();
+ }
+
+ public async Task CreateSnapshotAsync(string name, string? description = null, int? userId = null)
+ {
+ // Validate and sanitize inputs
+ var nameValidation = InputValidation.SnapshotValidation.ValidateSnapshotName(name);
+ if (nameValidation != ValidationResult.Success)
+ {
+ throw new ArgumentException($"Invalid snapshot name: {nameValidation.ErrorMessage}");
+ }
+
+ var sanitizedName = InputValidation.SanitizeString(name);
+ var sanitizedDescription = InputValidation.SanitizeString(description ?? string.Empty);
+
+ if (!string.IsNullOrEmpty(sanitizedDescription))
+ {
+ var descriptionValidation = InputValidation.SnapshotValidation.ValidateSnapshotDescription(sanitizedDescription);
+ if (descriptionValidation != ValidationResult.Success)
+ {
+ throw new ArgumentException($"Invalid snapshot description: {descriptionValidation.ErrorMessage}");
+ }
+ }
+
+ var snapshot = new SnapshotInfo
+ {
+ Id = _nextId++,
+ Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
+ Status = SnapshotStatus.Creating.ToString(),
+ Description = string.IsNullOrEmpty(sanitizedDescription) ? sanitizedName : sanitizedDescription,
+ UserId = userId,
+ CreatedAt = DateTime.UtcNow
+ };
+
+ try
+ {
+ await CreateFullSnapshotAsync(snapshot);
+ await SaveSnapshotMetadataAsync(snapshot);
+ snapshot.Status = SnapshotStatus.Completed.ToString();
+ await SaveSnapshotMetadataAsync(snapshot);
+ return snapshot;
+ }
+ catch (Exception)
+ {
+ snapshot.Status = SnapshotStatus.Failed.ToString();
+ await SaveSnapshotMetadataAsync(snapshot);
+ throw;
+ }
+ }
+
+ public async Task> ListSnapshotsAsync(string? type = null, int limit = 50)
+ {
+ var snapshots = new List();
+ var metadataFiles = Directory.GetFiles(_metadataPath, "*.json");
+
+ foreach (var file in metadataFiles)
+ {
+ try
+ {
+ var json = await File.ReadAllTextAsync(file);
+ var snapshot = JsonSerializer.Deserialize(json);
+
+ if (snapshot != null && (string.IsNullOrEmpty(type) || snapshot.Type.Equals(type, StringComparison.OrdinalIgnoreCase)))
+ {
+ snapshots.Add(snapshot);
+ }
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Warning: Could not load snapshot metadata from {file}: {ex.Message}");
+ }
+ }
+
+ return snapshots.OrderByDescending(s => s.CreatedAt).Take(limit).ToList();
+ }
+
+ public async Task GetSnapshotAsync(int id)
+ {
+ var metadataFile = Path.Combine(_metadataPath, $"{id}.json");
+
+ if (!File.Exists(metadataFile))
+ return null;
+
+ try
+ {
+ var json = await File.ReadAllTextAsync(metadataFile);
+ return JsonSerializer.Deserialize(json);
+ }
+ catch (Exception ex)
+ {
+ Console.WriteLine($"Error loading snapshot {id}: {ex.Message}");
+ return null;
+ }
+ }
+
+ public async Task DeleteSnapshotAsync(int id)
+ {
+ var snapshot = await GetSnapshotAsync(id);
+ if (snapshot == null)
+ throw new ArgumentException($"Snapshot {id} not found");
+
+ // Delete snapshot file
+ if (File.Exists(snapshot.FilePath))
+ {
+ File.Delete(snapshot.FilePath);
+ }
+
+ // Delete metadata
+ var metadataFile = Path.Combine(_metadataPath, $"{id}.json");
+ if (File.Exists(metadataFile))
+ {
+ File.Delete(metadataFile);
+ }
+ }
+
+ private async Task CreateFullSnapshotAsync(SnapshotInfo snapshot)
+ {
+ var fileName = $"snapshot_{snapshot.Id}_{snapshot.Timestamp}.sql";
+ var filePath = Path.Combine(_snapshotsPath, fileName);
+ snapshot.FilePath = filePath;
+
+ // Get current binlog status before creating snapshot
+ var binlogStatus = await GetCurrentBinlogStatusAsync();
+ snapshot.BinlogFile = binlogStatus.File;
+ snapshot.BinlogPosition = binlogStatus.Position;
+ snapshot.Type = "Full";
+
+ // Extract connection details from configuration
+ var connectionString = _config.ConnectionString;
+ var server = ExtractValue(connectionString, "Server") ?? "localhost";
+ var port = ExtractValue(connectionString, "Port") ?? "3306";
+ var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
+ var dbUserId = ExtractValue(connectionString, "Uid") ?? "root";
+ var password = ExtractValue(connectionString, "Pwd") ?? "";
+
+ // Build mariadb-dump command arguments with optimizations
+ var dumpArgs = $"-h{server} -P{port} -u{dbUserId}";
+ if (!string.IsNullOrEmpty(password))
+ {
+ dumpArgs += $" -p{password}";
+ }
+
+ // Add performance and consistency optimizations based on configuration
+ var optimizations = _config.SnapshotStorage.DumpOptimizations;
+
+ if (optimizations.SingleTransaction)
+ {
+ dumpArgs += $" --single-transaction";
+ }
+ if (optimizations.IncludeRoutines)
+ {
+ dumpArgs += $" --routines";
+ }
+ if (optimizations.IncludeTriggers)
+ {
+ dumpArgs += $" --triggers";
+ }
+ if (optimizations.IncludeEvents)
+ {
+ dumpArgs += $" --events";
+ }
+ if (optimizations.Quick)
+ {
+ dumpArgs += $" --quick";
+ }
+ if (optimizations.OrderByPrimary)
+ {
+ dumpArgs += $" --order-by-primary";
+ }
+ if (optimizations.FlushLogs)
+ {
+ dumpArgs += $" --flush-logs";
+ }
+ if (optimizations.MasterData > 0)
+ {
+ dumpArgs += $" --master-data={optimizations.MasterData}";
+ }
+ if (optimizations.Compact)
+ {
+ dumpArgs += $" --compact";
+ }
+ if (optimizations.NoAutocommit)
+ {
+ dumpArgs += $" --no-autocommit";
+ }
+ if (optimizations.LockTables)
+ {
+ dumpArgs += $" --lock-tables";
+ } else {
+ dumpArgs += $" --skip-lock-tables";
+ }
+ dumpArgs += $" --add-drop-database";
+ dumpArgs += $" --add-drop-table";
+ dumpArgs += $" --create-options";
+ if (optimizations.ExtendedInsert)
+ {
+ dumpArgs += $" --extended-insert";
+ }
+ if (optimizations.CompleteInsert)
+ {
+ dumpArgs += $" --complete-insert";
+ }
+ if (optimizations.HexBlob)
+ {
+ dumpArgs += $" --hex-blob";
+ }
+ dumpArgs += $" --net_buffer_length={optimizations.NetBufferLength}";
+ dumpArgs += $" --max_allowed_packet={optimizations.MaxAllowedPacket}";
+ if (optimizations.ExcludeTables.Any())
+ {
+ foreach (var table in optimizations.ExcludeTables)
+ {
+ dumpArgs += $" --ignore-table={database}.{table}";
+ }
+ }
+ if (optimizations.IncludeTables.Any())
+ {
+ dumpArgs += $" --tables";
+ foreach (var table in optimizations.IncludeTables)
+ {
+ dumpArgs += $" {table}";
+ }
+ dumpArgs += $" {database}";
+ }
+ else
+ {
+ dumpArgs += $" --databases {database}";
+ }
+
+ // Use mariadb-dump to create a complete database dump
+ var startInfo = new System.Diagnostics.ProcessStartInfo
+ {
+ FileName = "mariadb-dump",
+ Arguments = dumpArgs,
+ RedirectStandardOutput = true,
+ RedirectStandardError = true,
+ UseShellExecute = false,
+ CreateNoWindow = true
+ };
+
+ using var process = new System.Diagnostics.Process { StartInfo = startInfo };
+ using var outputFile = new StreamWriter(filePath);
+
+ var outputComplete = new TaskCompletionSource();
+ var errorComplete = new TaskCompletionSource();
+ var errorMessages = new List();
+ var startTime = DateTime.UtcNow;
+
+ Console.WriteLine($"Starting mariadb-dump with optimized settings...");
+ Console.WriteLine($"Command: mariadb-dump {dumpArgs}");
+
+ process.OutputDataReceived += (sender, e) =>
+ {
+ if (e.Data == null)
+ {
+ outputComplete.SetResult(true);
+ }
+ else
+ {
+ outputFile.WriteLine(e.Data);
+ outputFile.Flush(); // Ensure data is written immediately
+
+ // Report progress for large dumps
+ if (e.Data.StartsWith("-- Dump completed"))
+ {
+ var duration = DateTime.UtcNow - startTime;
+ Console.WriteLine($"Dump completed in {duration.TotalSeconds:F1} seconds");
+ }
+ }
+ };
+
+ process.ErrorDataReceived += (sender, e) =>
+ {
+ if (e.Data == null)
+ {
+ errorComplete.SetResult(true);
+ }
+ else
+ {
+ errorMessages.Add(e.Data);
+ Console.WriteLine($"[mariadb-dump] {e.Data}");
+ }
+ };
+
+ process.Start();
+ process.BeginOutputReadLine();
+ process.BeginErrorReadLine();
+
+ // Wait for both output and error streams to complete
+ await Task.WhenAll(outputComplete.Task, errorComplete.Task);
+ await process.WaitForExitAsync();
+
+ if (process.ExitCode != 0)
+ {
+ var errorSummary = string.Join("; ", errorMessages.Take(5)); // Show first 5 errors
+ throw new Exception($"mariadb-dump failed with exit code {process.ExitCode}. Errors: {errorSummary}");
+ }
+
+ var totalDuration = DateTime.UtcNow - startTime;
+ Console.WriteLine($"Database dump completed successfully in {totalDuration.TotalSeconds:F1} seconds");
+
+ // Calculate checksum
+ snapshot.Checksum = await CalculateFileChecksumAsync(filePath);
+ snapshot.DataSize = new FileInfo(filePath).Length;
+
+ // Compress if enabled
+ if (_config.SnapshotStorage.Compression)
+ {
+ await CompressFileAsync(filePath);
+ snapshot.DataSize = new FileInfo(filePath + ".lz4").Length;
+ snapshot.FilePath = filePath + ".lz4";
+ }
+
+ // Encrypt if enabled
+ if (_config.Security.Encryption && !string.IsNullOrEmpty(_config.Security.EncryptionKey))
+ {
+ var originalFilePath = snapshot.FilePath;
+ await EncryptFileAsync(snapshot.FilePath, _config.Security.EncryptionKey);
+ // Update the file path to point to the encrypted file
+ snapshot.FilePath = originalFilePath + ".enc";
+ // Update the file size to reflect the encrypted file size
+ snapshot.DataSize = new FileInfo(snapshot.FilePath).Length;
+ }
+ }
+
+ private string? ExtractValue(string connectionString, string key)
+ {
+ var pairs = connectionString.Split(';');
+ foreach (var pair in pairs)
+ {
+ var keyValue = pair.Split('=');
+ if (keyValue.Length == 2 && keyValue[0].Trim().Equals(key, StringComparison.OrdinalIgnoreCase))
+ {
+ return keyValue[1].Trim();
+ }
+ }
+ return null;
+ }
+
+ private async Task<(string File, long Position)> GetCurrentBinlogStatusAsync()
+ {
+ using var connection = new MySqlConnection(_config.ConnectionString);
+ await connection.OpenAsync();
+
+ using var command = new MySqlCommand("SHOW MASTER STATUS", connection);
+ using var reader = await command.ExecuteReaderAsync();
+
+ if (await reader.ReadAsync())
+ {
+ var file = reader.GetString("File");
+ var position = reader.GetInt64("Position");
+ return (file, position);
+ }
+
+ throw new Exception("Could not get current binlog status");
+ }
+
+ private async Task CalculateFileChecksumAsync(string filePath)
+ {
+ try
+ {
+ var fileInfo = new FileInfo(filePath);
+
+ // Use optimized checksum calculation based on file size
+ if (OptimizedFileService.ShouldUseParallelProcessing(fileInfo.Length))
+ {
+ return await _fileService.CalculateChecksumParallelAsync(filePath);
+ }
+ else
+ {
+ return await _fileService.CalculateChecksumStreamingAsync(filePath);
+ }
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Failed to calculate file checksum: {ex.Message}", ex);
+ }
+ }
+
+ private async Task CompressFileAsync(string filePath)
+ {
+ try
+ {
+ var fileInfo = new FileInfo(filePath);
+ var compressedPath = filePath + ".lz4";
+
+ // Use optimized LZ4 compression based on file size
+ if (OptimizedFileService.ShouldUseParallelProcessing(fileInfo.Length))
+ {
+ await _fileService.CompressFileStreamingAsync(filePath, compressedPath);
+ }
+ else
+ {
+ var bufferSize = OptimizedFileService.GetOptimalBufferSize(fileInfo.Length);
+ await _fileService.CompressFileStreamingAsync(filePath, compressedPath, bufferSize);
+ }
+
+ // Delete the original uncompressed file
+ File.Delete(filePath);
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"File compression failed: {ex.Message}", ex);
+ }
+ }
+
+ private async Task EncryptFileAsync(string filePath, string key)
+ {
+ try
+ {
+ // Use the encryption service for file encryption
+ var encryptedFilePath = filePath + ".enc";
+ await _encryptionService.EncryptFileAsync(filePath, encryptedFilePath);
+
+ // Delete the original file and update the path
+ File.Delete(filePath);
+
+ // Update the snapshot file path to point to the encrypted file
+ // This will be handled by the calling method
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"File encryption failed: {ex.Message}", ex);
+ }
+ }
+
+ private async Task SaveSnapshotMetadataAsync(SnapshotInfo snapshot)
+ {
+ try
+ {
+ var metadataFile = Path.Combine(_metadataPath, $"{snapshot.Id}.json");
+ var json = JsonSerializer.Serialize(snapshot, new JsonSerializerOptions { WriteIndented = true });
+ var jsonBytes = Encoding.UTF8.GetBytes(json);
+
+ // Use optimized file writing
+ await _fileService.WriteFileOptimizedAsync(metadataFile, jsonBytes);
+ }
+ catch (Exception ex)
+ {
+ throw new InvalidOperationException($"Failed to save snapshot metadata: {ex.Message}", ex);
+ }
+ }
+
+ private void LoadNextId()
+ {
+ var metadataFiles = Directory.GetFiles(_metadataPath, "*.json");
+ if (metadataFiles.Length > 0)
+ {
+ var maxId = metadataFiles
+ .Select(f => Path.GetFileNameWithoutExtension(f))
+ .Where(name => int.TryParse(name, out _))
+ .Select(int.Parse)
+ .Max();
+ _nextId = maxId + 1;
+ }
+ }
+
+ public async Task CreateIncrementalSnapshotAsync(string name, string? description = null, int? userId = null)
+ {
+ // Find the last snapshot (full or incremental)
+ var snapshots = await ListSnapshotsAsync();
+ var lastSnapshot = snapshots.OrderByDescending(s => s.CreatedAt).FirstOrDefault();
+ if (lastSnapshot == null)
+ {
+ throw new Exception("No previous snapshot found. Create a full snapshot first.");
+ }
+
+ // Get the binlog position from the last snapshot
+ string? startBinlogFile;
+ long? startBinlogPosition;
+
+ if (lastSnapshot.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
+ {
+ startBinlogFile = lastSnapshot.BinlogFile;
+ startBinlogPosition = lastSnapshot.BinlogPosition;
+ }
+ else
+ {
+ // For incremental snapshots, use the end position as the start for the next incremental
+ startBinlogFile = lastSnapshot.IncrementalBinlogEndFile;
+ startBinlogPosition = lastSnapshot.IncrementalBinlogEndPosition;
+ }
+
+ if (string.IsNullOrEmpty(startBinlogFile) || startBinlogPosition == null)
+ {
+ throw new Exception("No previous snapshot with binlog info found. Create a full snapshot first.");
+ }
+
+ // Get current binlog status
+ var (endFile, endPos) = await GetCurrentBinlogStatusAsync();
+
+ // Prepare file paths
+ var snapshot = new SnapshotInfo
+ {
+ Id = _nextId++,
+ Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
+ Status = SnapshotStatus.Creating.ToString(),
+ Description = description ?? name,
+ UserId = userId,
+ CreatedAt = DateTime.UtcNow,
+ Type = "Incremental",
+ ParentSnapshotId = lastSnapshot.Id,
+ IncrementalBinlogStartFile = startBinlogFile,
+ IncrementalBinlogStartPosition = startBinlogPosition,
+ IncrementalBinlogEndFile = endFile,
+ IncrementalBinlogEndPosition = endPos
+ };
+
+ var fileName = $"inc_{snapshot.Id}_{snapshot.Timestamp}.binlog";
+ var filePath = Path.Combine(_snapshotsPath, fileName);
+ snapshot.FilePath = filePath;
+
+ // Use mysqlbinlog to extract the binlog segment (no --raw, redirect output to file)
+ // Extract connection details from configuration
+ var connectionString = _config.ConnectionString;
+ var server = ExtractValue(connectionString, "Server") ?? "localhost";
+ var port = ExtractValue(connectionString, "Port") ?? "3306";
+ var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
+ var dbUserId = ExtractValue(connectionString, "Uid") ?? "root";
+ var password = ExtractValue(connectionString, "Pwd") ?? "";
+
+ var args = $"--read-from-remote-server --host={server} --port={port} --user={dbUserId}";
+ if (!string.IsNullOrEmpty(password))
+ {
+ args += $" --password={password}";
+ }
+ args += $" --start-position={startBinlogPosition} --stop-position={endPos} {startBinlogFile}";
+ if (startBinlogFile != endFile)
+ {
+ // If binlog rotated, need to handle multiple files (not implemented here for brevity)
+ throw new NotImplementedException("Incremental snapshot across multiple binlog files is not yet supported.");
+ }
+
+ var startInfo = new System.Diagnostics.ProcessStartInfo
+ {
+ FileName = "mysqlbinlog",
+ Arguments = args,
+ RedirectStandardOutput = true,
+ RedirectStandardError = true,
+ UseShellExecute = false,
+ CreateNoWindow = true
+ };
+
+ using var process = new System.Diagnostics.Process { StartInfo = startInfo };
+ using var outputFile = new StreamWriter(filePath);
+ var error = new StringBuilder();
+ process.ErrorDataReceived += (sender, e) => { if (e.Data != null) error.AppendLine(e.Data); };
+ process.Start();
+ process.BeginErrorReadLine();
+ // Write stdout to file
+ while (!process.StandardOutput.EndOfStream)
+ {
+ var line = await process.StandardOutput.ReadLineAsync();
+ if (line != null)
+ await outputFile.WriteLineAsync(line);
+ }
+ await process.WaitForExitAsync();
+ outputFile.Close();
+ if (process.ExitCode != 0)
+ {
+ throw new Exception($"mysqlbinlog failed: {error}");
+ }
+
+ // Calculate checksum and size
+ snapshot.Checksum = await CalculateFileChecksumAsync(filePath);
+ snapshot.DataSize = new FileInfo(filePath).Length;
+
+ // Compress if enabled
+ if (_config.SnapshotStorage.Compression)
+ {
+ await CompressFileAsync(filePath);
+ snapshot.DataSize = new FileInfo(filePath + ".lz4").Length;
+ snapshot.FilePath = filePath + ".lz4";
+ }
+
+ // Encrypt if enabled
+ if (_config.Security.Encryption && !string.IsNullOrEmpty(_config.Security.EncryptionKey))
+ {
+ var originalFilePath = snapshot.FilePath;
+ await EncryptFileAsync(snapshot.FilePath, _config.Security.EncryptionKey);
+ // Update the file path to point to the encrypted file
+ snapshot.FilePath = originalFilePath + ".enc";
+ // Update the file size to reflect the encrypted file size
+ snapshot.DataSize = new FileInfo(snapshot.FilePath).Length;
+ }
+
+ // Save metadata
+ await SaveSnapshotMetadataAsync(snapshot);
+ snapshot.Status = SnapshotStatus.Completed.ToString();
+ await SaveSnapshotMetadataAsync(snapshot);
+ return snapshot;
+ }
+ }
+}
diff --git a/appsettings.test.json b/appsettings.test.json
new file mode 100644
index 0000000..0173e2d
--- /dev/null
+++ b/appsettings.test.json
@@ -0,0 +1,49 @@
+{
+ "connectionString": "Server=localhost;Port=3306;Database=trading_platform;Uid=root;Pwd=1;",
+ "binlogReader": {
+ "host": "localhost",
+ "port": 3306,
+ "username": "root",
+ "password": "1",
+ "serverId": 999,
+ "startPosition": 4,
+ "heartbeatInterval": 30
+ },
+ "snapshotStorage": {
+ "path": "./snapshots_test",
+ "compression": true,
+ "retentionDays": 7,
+ "maxFileSize": 104857600,
+ "dumpOptimizations": {
+ "singleTransaction": true,
+ "includeRoutines": true,
+ "includeTriggers": true,
+ "includeEvents": true,
+ "extendedInsert": true,
+ "completeInsert": true,
+ "hexBlob": true,
+ "netBufferLength": 16384,
+ "maxAllowedPacket": "1G",
+ "excludeTables": [],
+ "includeTables": [],
+ "quick": true,
+ "orderByPrimary": true,
+ "flushLogs": true,
+ "masterData": 2,
+ "compact": false,
+ "noAutocommit": false,
+ "lockTables": false
+ }
+ },
+ "eventStore": {
+ "path": "./events",
+ "maxFileSize": 52428800,
+ "retentionDays": 90,
+ "batchSize": 1000,
+ "flushInterval": 5
+ },
+ "security": {
+ "encryption": false,
+ "encryptionKey": null
+ }
+}
\ No newline at end of file
diff --git a/events_test/events_1751237015.json b/events_test/events_1751237015.json
new file mode 100644
index 0000000..e69de29
diff --git a/events_test/events_1751237031.json b/events_test/events_1751237031.json
new file mode 100644
index 0000000..e69de29
diff --git a/events_test/recovery_points/test_incremental_restore.json b/events_test/recovery_points/test_incremental_restore.json
new file mode 100644
index 0000000..83cb718
--- /dev/null
+++ b/events_test/recovery_points/test_incremental_restore.json
@@ -0,0 +1,9 @@
+{
+ "id": 1,
+ "name": "test_incremental_restore",
+ "timestamp": 1751237380,
+ "description": null,
+ "eventCount": 0,
+ "createdAt": "2025-06-29T22:49:40.92883Z",
+ "lastEventId": 0
+}
\ No newline at end of file