add project
This commit is contained in:
345
Services/BinlogReader.cs
Normal file
345
Services/BinlogReader.cs
Normal file
@ -0,0 +1,345 @@
|
||||
using System;
|
||||
using System.Collections.Generic;
|
||||
using System.IO;
|
||||
using System.Text;
|
||||
using System.Threading;
|
||||
using System.Threading.Tasks;
|
||||
using MySqlConnector;
|
||||
using DatabaseSnapshotsService.Models;
|
||||
|
||||
namespace DatabaseSnapshotsService.Services
|
||||
{
|
||||
public class BinlogReader
|
||||
{
|
||||
private readonly BinlogReaderConfig _config;
|
||||
private readonly EventStore _eventStore;
|
||||
private MySqlConnection _connection;
|
||||
private bool _isConnected;
|
||||
private bool _isReading;
|
||||
private CancellationTokenSource _cancellationTokenSource;
|
||||
|
||||
public event EventHandler<BinlogEvent> EventReceived;
|
||||
public event EventHandler<string> LogMessage;
|
||||
|
||||
public BinlogReader(BinlogReaderConfig config, EventStore eventStore)
|
||||
{
|
||||
_config = config;
|
||||
_eventStore = eventStore;
|
||||
}
|
||||
|
||||
public async Task<bool> ConnectAsync()
|
||||
{
|
||||
try
|
||||
{
|
||||
LogMessage?.Invoke(this, $"Attempting to connect to {_config.Host}:{_config.Port}");
|
||||
|
||||
var connectionString = $"Server={_config.Host};Port={_config.Port};User ID={_config.Username};Password={_config.Password};";
|
||||
_connection = new MySqlConnection(connectionString);
|
||||
|
||||
await _connection.OpenAsync();
|
||||
_isConnected = true;
|
||||
|
||||
LogMessage?.Invoke(this, $"Connected to MySQL at {_config.Host}:{_config.Port}");
|
||||
return true;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
LogMessage?.Invoke(this, $"Connection failed: {ex.Message}");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task StartReadingAsync(string binlogFile = null, long position = 4)
|
||||
{
|
||||
if (!_isConnected)
|
||||
{
|
||||
throw new InvalidOperationException("Not connected to MySQL");
|
||||
}
|
||||
|
||||
_cancellationTokenSource = new CancellationTokenSource();
|
||||
_isReading = true;
|
||||
|
||||
try
|
||||
{
|
||||
LogMessage?.Invoke(this, $"Starting binlog read from position {position}");
|
||||
|
||||
// Get current binlog status
|
||||
await ReadBinlogStatusAsync();
|
||||
|
||||
// Read binlog events
|
||||
await ReadBinlogEventsAsync(binlogFile, position);
|
||||
}
|
||||
catch (OperationCanceledException)
|
||||
{
|
||||
LogMessage?.Invoke(this, "Binlog reading stopped");
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
LogMessage?.Invoke(this, $"Error reading binlog: {ex.Message}");
|
||||
}
|
||||
finally
|
||||
{
|
||||
_isReading = false;
|
||||
}
|
||||
}
|
||||
|
||||
public void StopReading()
|
||||
{
|
||||
_cancellationTokenSource?.Cancel();
|
||||
_isReading = false;
|
||||
}
|
||||
|
||||
public void Disconnect()
|
||||
{
|
||||
StopReading();
|
||||
_connection?.Close();
|
||||
_connection?.Dispose();
|
||||
_isConnected = false;
|
||||
}
|
||||
|
||||
private async Task ReadBinlogStatusAsync()
|
||||
{
|
||||
using var command = _connection.CreateCommand();
|
||||
command.CommandText = "SHOW MASTER STATUS";
|
||||
|
||||
using var reader = await command.ExecuteReaderAsync();
|
||||
if (await reader.ReadAsync())
|
||||
{
|
||||
var file = reader.GetString("File");
|
||||
var position = reader.GetInt64("Position");
|
||||
|
||||
LogMessage?.Invoke(this, $"Current binlog: {file} at position {position}");
|
||||
|
||||
// Create a status event and store it
|
||||
var statusEvent = new DatabaseEvent
|
||||
{
|
||||
Type = "status",
|
||||
Table = "system",
|
||||
Operation = "binlog_status",
|
||||
Data = $"SHOW MASTER STATUS - File: {file}, Position: {position}",
|
||||
BinlogPosition = (long)position,
|
||||
ServerId = _config.ServerId
|
||||
};
|
||||
|
||||
await _eventStore.StoreEventAsync(statusEvent);
|
||||
|
||||
// Create a binlog event for the status
|
||||
var evt = new BinlogEvent
|
||||
{
|
||||
Timestamp = DateTimeOffset.UtcNow,
|
||||
EventType = BinlogEventType.QUERY_EVENT,
|
||||
LogPosition = (uint)position,
|
||||
EventSize = 0,
|
||||
Flags = 0,
|
||||
EventData = Encoding.UTF8.GetBytes($"SHOW MASTER STATUS - File: {file}, Position: {position}"),
|
||||
RawPacket = new byte[0]
|
||||
};
|
||||
|
||||
EventReceived?.Invoke(this, evt);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task ReadBinlogEventsAsync(string binlogFile, long position)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Get the binlog file to read from
|
||||
var targetFile = binlogFile;
|
||||
if (string.IsNullOrEmpty(targetFile))
|
||||
{
|
||||
targetFile = await GetCurrentBinlogFileAsync();
|
||||
}
|
||||
|
||||
LogMessage?.Invoke(this, $"Reading binlog events from {targetFile} starting at position {position}");
|
||||
|
||||
using var command = _connection.CreateCommand();
|
||||
command.CommandText = $"SHOW BINLOG EVENTS IN '{targetFile}' FROM {position}";
|
||||
|
||||
LogMessage?.Invoke(this, $"Executing query: {command.CommandText}");
|
||||
|
||||
using var reader = await command.ExecuteReaderAsync();
|
||||
var eventCount = 0;
|
||||
|
||||
LogMessage?.Invoke(this, "Starting to read binlog events...");
|
||||
|
||||
while (await reader.ReadAsync())
|
||||
{
|
||||
if (_cancellationTokenSource.Token.IsCancellationRequested)
|
||||
break;
|
||||
|
||||
var logName = reader.GetString("Log_name");
|
||||
var logPos = reader.GetInt64("Pos");
|
||||
var eventType = reader.GetString("Event_type");
|
||||
var serverId = reader.GetInt32("Server_id");
|
||||
var endLogPos = reader.GetInt64("End_log_pos");
|
||||
var info = reader.GetString("Info");
|
||||
|
||||
// Only log every 100 events to reduce console output
|
||||
eventCount++;
|
||||
if (eventCount % 100 == 0)
|
||||
{
|
||||
LogMessage?.Invoke(this, $"Processed {eventCount} events...");
|
||||
}
|
||||
|
||||
// Parse event type
|
||||
var binlogEventType = ParseEventType(eventType);
|
||||
|
||||
// Create and store database event
|
||||
var databaseEvent = new DatabaseEvent
|
||||
{
|
||||
Type = "binlog",
|
||||
Table = ExtractTableName(info),
|
||||
Operation = ExtractOperation(eventType, info),
|
||||
Data = info,
|
||||
BinlogPosition = logPos,
|
||||
ServerId = serverId
|
||||
};
|
||||
|
||||
await _eventStore.StoreEventAsync(databaseEvent);
|
||||
|
||||
// Create binlog event for real-time processing
|
||||
var evt = new BinlogEvent
|
||||
{
|
||||
Timestamp = DateTimeOffset.UtcNow,
|
||||
EventType = binlogEventType,
|
||||
LogPosition = (uint)logPos,
|
||||
EventSize = (uint)(endLogPos - logPos),
|
||||
Flags = 0,
|
||||
EventData = Encoding.UTF8.GetBytes(info),
|
||||
RawPacket = new byte[0]
|
||||
};
|
||||
|
||||
EventReceived?.Invoke(this, evt);
|
||||
}
|
||||
|
||||
LogMessage?.Invoke(this, $"Completed reading binlog events. Total events processed: {eventCount}");
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
LogMessage?.Invoke(this, $"Error reading binlog events: {ex.Message}");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
private async Task<string> GetCurrentBinlogFileAsync()
|
||||
{
|
||||
using var command = _connection.CreateCommand();
|
||||
command.CommandText = "SHOW MASTER STATUS";
|
||||
|
||||
using var reader = await command.ExecuteReaderAsync();
|
||||
if (await reader.ReadAsync())
|
||||
{
|
||||
return reader.GetString("File");
|
||||
}
|
||||
|
||||
throw new Exception("Could not determine current binlog file");
|
||||
}
|
||||
|
||||
private string ExtractTableName(string info)
|
||||
{
|
||||
// Try to extract table name from various event types
|
||||
if (info.Contains("table_id:"))
|
||||
{
|
||||
var match = System.Text.RegularExpressions.Regex.Match(info, @"table_id: \d+ \(([^)]+)\)");
|
||||
if (match.Success)
|
||||
{
|
||||
return match.Groups[1].Value;
|
||||
}
|
||||
}
|
||||
|
||||
// For query events, try to extract table name from SQL
|
||||
if (info.Contains("INSERT INTO") || info.Contains("UPDATE") || info.Contains("DELETE FROM"))
|
||||
{
|
||||
var match = System.Text.RegularExpressions.Regex.Match(info, @"(?:INSERT INTO|UPDATE|DELETE FROM)\s+(\w+)");
|
||||
if (match.Success)
|
||||
{
|
||||
return match.Groups[1].Value;
|
||||
}
|
||||
}
|
||||
|
||||
return "unknown";
|
||||
}
|
||||
|
||||
private string ExtractOperation(string eventType, string info)
|
||||
{
|
||||
return eventType.ToUpper() switch
|
||||
{
|
||||
"WRITE_ROWS_V1" => "insert",
|
||||
"UPDATE_ROWS_V1" => "update",
|
||||
"DELETE_ROWS_V1" => "delete",
|
||||
"QUERY" => "query",
|
||||
"ANNOTATE_ROWS" => "query",
|
||||
_ => eventType.ToLower()
|
||||
};
|
||||
}
|
||||
|
||||
private BinlogEventType ParseEventType(string eventType)
|
||||
{
|
||||
return eventType.ToUpper() switch
|
||||
{
|
||||
"QUERY" => BinlogEventType.QUERY_EVENT,
|
||||
"XID" => BinlogEventType.XID_EVENT,
|
||||
"GTID" => BinlogEventType.GTID_EVENT,
|
||||
"TABLE_MAP" => BinlogEventType.TABLE_MAP_EVENT,
|
||||
"WRITE_ROWS_V1" => BinlogEventType.WRITE_ROWS_EVENT_V1,
|
||||
"UPDATE_ROWS_V1" => BinlogEventType.UPDATE_ROWS_EVENT_V1,
|
||||
"DELETE_ROWS_V1" => BinlogEventType.DELETE_ROWS_EVENT_V1,
|
||||
"ANNOTATE_ROWS" => BinlogEventType.QUERY_EVENT, // Treat as query event
|
||||
"ROTATE" => BinlogEventType.ROTATE_EVENT,
|
||||
"FORMAT_DESCRIPTION" => BinlogEventType.FORMAT_DESCRIPTION_EVENT,
|
||||
_ => BinlogEventType.UNKNOWN_EVENT
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
public class BinlogEvent
|
||||
{
|
||||
public DateTimeOffset Timestamp { get; set; }
|
||||
public BinlogEventType EventType { get; set; }
|
||||
public uint LogPosition { get; set; }
|
||||
public uint EventSize { get; set; }
|
||||
public ushort Flags { get; set; }
|
||||
public byte[] EventData { get; set; }
|
||||
public byte[] RawPacket { get; set; }
|
||||
}
|
||||
|
||||
public enum BinlogEventType : byte
|
||||
{
|
||||
UNKNOWN_EVENT = 0x00,
|
||||
START_EVENT_V3 = 0x01,
|
||||
QUERY_EVENT = 0x02,
|
||||
STOP_EVENT = 0x03,
|
||||
ROTATE_EVENT = 0x04,
|
||||
INTVAR_EVENT = 0x05,
|
||||
LOAD_EVENT = 0x06,
|
||||
SLAVE_EVENT = 0x07,
|
||||
CREATE_FILE_EVENT = 0x08,
|
||||
APPEND_BLOCK_EVENT = 0x09,
|
||||
EXEC_LOAD_EVENT = 0x0A,
|
||||
DELETE_FILE_EVENT = 0x0B,
|
||||
NEW_LOAD_EVENT = 0x0C,
|
||||
RAND_EVENT = 0x0D,
|
||||
USER_VAR_EVENT = 0x0E,
|
||||
FORMAT_DESCRIPTION_EVENT = 0x0F,
|
||||
XID_EVENT = 0x10,
|
||||
BEGIN_LOAD_QUERY_EVENT = 0x11,
|
||||
EXECUTE_LOAD_QUERY_EVENT = 0x12,
|
||||
TABLE_MAP_EVENT = 0x13,
|
||||
WRITE_ROWS_EVENT_V0 = 0x14,
|
||||
UPDATE_ROWS_EVENT_V0 = 0x15,
|
||||
DELETE_ROWS_EVENT_V0 = 0x16,
|
||||
WRITE_ROWS_EVENT_V1 = 0x17,
|
||||
UPDATE_ROWS_EVENT_V1 = 0x18,
|
||||
DELETE_ROWS_EVENT_V1 = 0x19,
|
||||
INCIDENT_EVENT = 0x1A,
|
||||
HEARTBEAT_EVENT = 0x1B,
|
||||
IGNORABLE_EVENT = 0x1C,
|
||||
ROWS_QUERY_EVENT = 0x1D,
|
||||
WRITE_ROWS_EVENT_V2 = 0x1E,
|
||||
UPDATE_ROWS_EVENT_V2 = 0x1F,
|
||||
DELETE_ROWS_EVENT_V2 = 0x20,
|
||||
GTID_EVENT = 0x21,
|
||||
ANONYMOUS_GTID_EVENT = 0x22,
|
||||
PREVIOUS_GTIDS_EVENT = 0x23
|
||||
}
|
||||
}
|
||||
324
Services/EncryptionService.cs
Normal file
324
Services/EncryptionService.cs
Normal file
@ -0,0 +1,324 @@
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace DatabaseSnapshotsService.Services
|
||||
{
|
||||
public class EncryptionService
|
||||
{
|
||||
private readonly string _encryptionKey;
|
||||
private readonly bool _encryptionEnabled;
|
||||
|
||||
public EncryptionService(string? encryptionKey, bool encryptionEnabled = false)
|
||||
{
|
||||
_encryptionEnabled = encryptionEnabled;
|
||||
|
||||
if (encryptionEnabled && string.IsNullOrWhiteSpace(encryptionKey))
|
||||
{
|
||||
throw new ArgumentException("Encryption key is required when encryption is enabled");
|
||||
}
|
||||
|
||||
_encryptionKey = encryptionKey ?? string.Empty;
|
||||
}
|
||||
|
||||
public bool IsEncryptionEnabled => _encryptionEnabled;
|
||||
|
||||
/// <summary>
|
||||
/// Encrypts data using AES-256-CBC
|
||||
/// </summary>
|
||||
/// <param name="plaintext">Data to encrypt</param>
|
||||
/// <returns>Encrypted data as base64 string</returns>
|
||||
public async Task<string> EncryptAsync(string plaintext)
|
||||
{
|
||||
if (!_encryptionEnabled)
|
||||
{
|
||||
return plaintext;
|
||||
}
|
||||
|
||||
if (string.IsNullOrEmpty(plaintext))
|
||||
{
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
using var aes = Aes.Create();
|
||||
aes.KeySize = 256;
|
||||
aes.Mode = CipherMode.CBC;
|
||||
aes.Padding = PaddingMode.PKCS7;
|
||||
|
||||
// Derive key from the provided encryption key
|
||||
var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
|
||||
aes.Key = key;
|
||||
|
||||
// Generate random IV
|
||||
aes.GenerateIV();
|
||||
|
||||
using var encryptor = aes.CreateEncryptor();
|
||||
var plaintextBytes = Encoding.UTF8.GetBytes(plaintext);
|
||||
var ciphertext = encryptor.TransformFinalBlock(plaintextBytes, 0, plaintextBytes.Length);
|
||||
|
||||
// Combine IV and ciphertext
|
||||
var result = new byte[aes.IV.Length + ciphertext.Length];
|
||||
Buffer.BlockCopy(aes.IV, 0, result, 0, aes.IV.Length);
|
||||
Buffer.BlockCopy(ciphertext, 0, result, aes.IV.Length, ciphertext.Length);
|
||||
|
||||
return Convert.ToBase64String(result);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Encryption failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Decrypts data using AES-256-CBC
|
||||
/// </summary>
|
||||
/// <param name="ciphertext">Encrypted data as base64 string</param>
|
||||
/// <returns>Decrypted data</returns>
|
||||
public async Task<string> DecryptAsync(string ciphertext)
|
||||
{
|
||||
if (!_encryptionEnabled)
|
||||
{
|
||||
return ciphertext;
|
||||
}
|
||||
|
||||
if (string.IsNullOrEmpty(ciphertext))
|
||||
{
|
||||
return string.Empty;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var encryptedData = Convert.FromBase64String(ciphertext);
|
||||
|
||||
using var aes = Aes.Create();
|
||||
aes.KeySize = 256;
|
||||
aes.Mode = CipherMode.CBC;
|
||||
aes.Padding = PaddingMode.PKCS7;
|
||||
|
||||
// Derive key from the provided encryption key
|
||||
var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
|
||||
aes.Key = key;
|
||||
|
||||
// Extract IV and ciphertext
|
||||
var ivSize = aes.IV.Length;
|
||||
var ciphertextSize = encryptedData.Length - ivSize;
|
||||
|
||||
if (ciphertextSize < 0)
|
||||
{
|
||||
throw new ArgumentException("Invalid encrypted data format");
|
||||
}
|
||||
|
||||
var iv = new byte[ivSize];
|
||||
var ciphertextBytes = new byte[ciphertextSize];
|
||||
|
||||
Buffer.BlockCopy(encryptedData, 0, iv, 0, ivSize);
|
||||
Buffer.BlockCopy(encryptedData, ivSize, ciphertextBytes, 0, ciphertextSize);
|
||||
|
||||
aes.IV = iv;
|
||||
|
||||
using var decryptor = aes.CreateDecryptor();
|
||||
var plaintext = decryptor.TransformFinalBlock(ciphertextBytes, 0, ciphertextBytes.Length);
|
||||
|
||||
return Encoding.UTF8.GetString(plaintext);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Decryption failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Encrypts a file
|
||||
/// </summary>
|
||||
/// <param name="sourceFilePath">Path to the source file</param>
|
||||
/// <param name="destinationFilePath">Path for the encrypted file</param>
|
||||
public async Task EncryptFileAsync(string sourceFilePath, string destinationFilePath)
|
||||
{
|
||||
if (!_encryptionEnabled)
|
||||
{
|
||||
// If encryption is disabled, just copy the file
|
||||
File.Copy(sourceFilePath, destinationFilePath, true);
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
using var sourceStream = File.OpenRead(sourceFilePath);
|
||||
using var destinationStream = File.Create(destinationFilePath);
|
||||
|
||||
using var aes = Aes.Create();
|
||||
aes.KeySize = 256;
|
||||
aes.Mode = CipherMode.CBC;
|
||||
aes.Padding = PaddingMode.PKCS7;
|
||||
|
||||
var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
|
||||
aes.Key = key;
|
||||
aes.GenerateIV();
|
||||
|
||||
// Write IV to the beginning of the file
|
||||
await destinationStream.WriteAsync(aes.IV);
|
||||
|
||||
using var encryptor = aes.CreateEncryptor();
|
||||
using var cryptoStream = new CryptoStream(destinationStream, encryptor, CryptoStreamMode.Write);
|
||||
|
||||
await sourceStream.CopyToAsync(cryptoStream);
|
||||
await cryptoStream.FlushFinalBlockAsync();
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"File encryption failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Decrypts a file
|
||||
/// </summary>
|
||||
/// <param name="sourceFilePath">Path to the encrypted file</param>
|
||||
/// <param name="destinationFilePath">Path for the decrypted file</param>
|
||||
public async Task DecryptFileAsync(string sourceFilePath, string destinationFilePath)
|
||||
{
|
||||
if (!_encryptionEnabled)
|
||||
{
|
||||
// If encryption is disabled, just copy the file
|
||||
File.Copy(sourceFilePath, destinationFilePath, true);
|
||||
return;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
using var sourceStream = File.OpenRead(sourceFilePath);
|
||||
using var destinationStream = File.Create(destinationFilePath);
|
||||
|
||||
using var aes = Aes.Create();
|
||||
aes.KeySize = 256;
|
||||
aes.Mode = CipherMode.CBC;
|
||||
aes.Padding = PaddingMode.PKCS7;
|
||||
|
||||
var key = DeriveKey(_encryptionKey, aes.KeySize / 8);
|
||||
aes.Key = key;
|
||||
|
||||
// Read IV from the beginning of the file
|
||||
var iv = new byte[aes.IV.Length];
|
||||
await sourceStream.ReadAsync(iv);
|
||||
aes.IV = iv;
|
||||
|
||||
using var decryptor = aes.CreateDecryptor();
|
||||
using var cryptoStream = new CryptoStream(sourceStream, decryptor, CryptoStreamMode.Read);
|
||||
|
||||
await cryptoStream.CopyToAsync(destinationStream);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"File decryption failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Generates a secure encryption key
|
||||
/// </summary>
|
||||
/// <param name="keySize">Size of the key in bits (default: 256)</param>
|
||||
/// <returns>Base64 encoded encryption key</returns>
|
||||
public static string GenerateEncryptionKey(int keySize = 256)
|
||||
{
|
||||
if (keySize != 128 && keySize != 192 && keySize != 256)
|
||||
{
|
||||
throw new ArgumentException("Key size must be 128, 192, or 256 bits");
|
||||
}
|
||||
|
||||
using var aes = Aes.Create();
|
||||
aes.KeySize = keySize;
|
||||
aes.GenerateKey();
|
||||
|
||||
return Convert.ToBase64String(aes.Key);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Validates an encryption key
|
||||
/// </summary>
|
||||
/// <param name="key">The encryption key to validate</param>
|
||||
/// <returns>True if the key is valid, false otherwise</returns>
|
||||
public static bool ValidateEncryptionKey(string key)
|
||||
{
|
||||
if (string.IsNullOrWhiteSpace(key))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
var keyBytes = Convert.FromBase64String(key);
|
||||
return keyBytes.Length == 16 || keyBytes.Length == 24 || keyBytes.Length == 32; // 128, 192, or 256 bits
|
||||
}
|
||||
catch
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Derives a key from a password using PBKDF2
|
||||
/// </summary>
|
||||
/// <param name="password">The password to derive the key from</param>
|
||||
/// <param name="keySize">Size of the derived key in bytes</param>
|
||||
/// <returns>The derived key</returns>
|
||||
private static byte[] DeriveKey(string password, int keySize)
|
||||
{
|
||||
// Create a deterministic salt from the password hash
|
||||
using var sha256 = SHA256.Create();
|
||||
var passwordHash = sha256.ComputeHash(Encoding.UTF8.GetBytes(password));
|
||||
var salt = new byte[32];
|
||||
Array.Copy(passwordHash, salt, Math.Min(passwordHash.Length, salt.Length));
|
||||
|
||||
using var pbkdf2 = new Rfc2898DeriveBytes(password, salt, 10000, HashAlgorithmName.SHA256);
|
||||
return pbkdf2.GetBytes(keySize);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a checksum of encrypted data for integrity verification
|
||||
/// </summary>
|
||||
/// <param name="data">The data to create a checksum for</param>
|
||||
/// <returns>SHA-256 hash of the data</returns>
|
||||
public static string CreateChecksum(byte[] data)
|
||||
{
|
||||
using var sha256 = SHA256.Create();
|
||||
var hash = sha256.ComputeHash(data);
|
||||
return Convert.ToBase64String(hash);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Creates a checksum of a string
|
||||
/// </summary>
|
||||
/// <param name="data">The string to create a checksum for</param>
|
||||
/// <returns>SHA-256 hash of the string</returns>
|
||||
public static string CreateChecksum(string data)
|
||||
{
|
||||
var bytes = Encoding.UTF8.GetBytes(data);
|
||||
return CreateChecksum(bytes);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies the integrity of encrypted data
|
||||
/// </summary>
|
||||
/// <param name="data">The data to verify</param>
|
||||
/// <param name="expectedChecksum">The expected checksum</param>
|
||||
/// <returns>True if the checksum matches, false otherwise</returns>
|
||||
public static bool VerifyChecksum(byte[] data, string expectedChecksum)
|
||||
{
|
||||
var actualChecksum = CreateChecksum(data);
|
||||
return actualChecksum.Equals(expectedChecksum, StringComparison.Ordinal);
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Verifies the integrity of a string
|
||||
/// </summary>
|
||||
/// <param name="data">The string to verify</param>
|
||||
/// <param name="expectedChecksum">The expected checksum</param>
|
||||
/// <returns>True if the checksum matches, false otherwise</returns>
|
||||
public static bool VerifyChecksum(string data, string expectedChecksum)
|
||||
{
|
||||
var actualChecksum = CreateChecksum(data);
|
||||
return actualChecksum.Equals(expectedChecksum, StringComparison.Ordinal);
|
||||
}
|
||||
}
|
||||
}
|
||||
140
Services/EventStore.cs
Normal file
140
Services/EventStore.cs
Normal file
@ -0,0 +1,140 @@
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using DatabaseSnapshotsService.Models;
|
||||
|
||||
namespace DatabaseSnapshotsService.Services
|
||||
{
|
||||
public class EventStore
|
||||
{
|
||||
private readonly EventStoreConfig _config;
|
||||
private readonly string _eventsPath;
|
||||
private readonly string _indexPath;
|
||||
private readonly object _writeLock = new object();
|
||||
private long _currentEventId = 0;
|
||||
private string _currentEventFile = string.Empty;
|
||||
private StreamWriter? _currentWriter;
|
||||
private long _currentFileSize = 0;
|
||||
|
||||
public EventStore(EventStoreConfig config)
|
||||
{
|
||||
_config = config;
|
||||
_eventsPath = Path.GetFullPath(config.Path);
|
||||
_indexPath = Path.Combine(_eventsPath, "index");
|
||||
|
||||
// Ensure directories exist
|
||||
Directory.CreateDirectory(_eventsPath);
|
||||
Directory.CreateDirectory(_indexPath);
|
||||
|
||||
// Load next event ID and initialize current file
|
||||
LoadNextEventId();
|
||||
InitializeCurrentFile();
|
||||
}
|
||||
|
||||
public async Task<long> StoreEventAsync(DatabaseEvent evt)
|
||||
{
|
||||
lock (_writeLock)
|
||||
{
|
||||
evt.Id = ++_currentEventId;
|
||||
evt.Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
|
||||
evt.Checksum = CalculateEventChecksum(evt);
|
||||
|
||||
// Check if we need to rotate the file
|
||||
if (_currentFileSize > _config.MaxFileSize || _currentWriter == null)
|
||||
{
|
||||
RotateEventFile();
|
||||
}
|
||||
|
||||
// Write event to current file
|
||||
var json = JsonSerializer.Serialize(evt);
|
||||
_currentWriter?.WriteLine(json);
|
||||
_currentWriter?.Flush();
|
||||
_currentFileSize += json.Length + Environment.NewLine.Length;
|
||||
|
||||
// Update index
|
||||
UpdateEventIndex(evt);
|
||||
|
||||
return evt.Id;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<long> GetLastEventIdAsync()
|
||||
{
|
||||
var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
|
||||
long lastId = 0;
|
||||
|
||||
foreach (var file in eventFiles.OrderByDescending(f => f))
|
||||
{
|
||||
var lines = await File.ReadAllLinesAsync(file);
|
||||
if (lines.Length > 0)
|
||||
{
|
||||
var lastLine = lines.Last();
|
||||
try
|
||||
{
|
||||
var lastEvent = JsonSerializer.Deserialize<DatabaseEvent>(lastLine);
|
||||
if (lastEvent != null && lastEvent.Id > lastId)
|
||||
{
|
||||
lastId = lastEvent.Id;
|
||||
}
|
||||
}
|
||||
catch (JsonException)
|
||||
{
|
||||
// Skip malformed JSON
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lastId;
|
||||
}
|
||||
|
||||
private void LoadNextEventId()
|
||||
{
|
||||
var lastId = GetLastEventIdAsync().GetAwaiter().GetResult();
|
||||
_currentEventId = lastId;
|
||||
}
|
||||
|
||||
private void InitializeCurrentFile()
|
||||
{
|
||||
var timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds();
|
||||
_currentEventFile = Path.Combine(_eventsPath, $"events_{timestamp}.json");
|
||||
_currentWriter = new StreamWriter(_currentEventFile, true, Encoding.UTF8);
|
||||
_currentFileSize = 0;
|
||||
}
|
||||
|
||||
private void RotateEventFile()
|
||||
{
|
||||
// Close current writer and delete file if empty
|
||||
if (_currentWriter != null)
|
||||
{
|
||||
_currentWriter.Close();
|
||||
if (!string.IsNullOrEmpty(_currentEventFile) && File.Exists(_currentEventFile))
|
||||
{
|
||||
var fileInfo = new FileInfo(_currentEventFile);
|
||||
if (fileInfo.Length == 0)
|
||||
{
|
||||
File.Delete(_currentEventFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
_currentEventFile = Path.Combine(_eventsPath, $"events_{DateTime.UtcNow:yyyyMMdd_HHmmss}_{_currentEventId}.json");
|
||||
_currentWriter = new StreamWriter(_currentEventFile, append: false, Encoding.UTF8);
|
||||
_currentFileSize = 0;
|
||||
}
|
||||
|
||||
private string CalculateEventChecksum(DatabaseEvent evt)
|
||||
{
|
||||
var data = $"{evt.Id}{evt.Timestamp}{evt.Type}{evt.Table}{evt.Operation}{evt.Data}{evt.BinlogPosition}{evt.ServerId}";
|
||||
using var sha256 = System.Security.Cryptography.SHA256.Create();
|
||||
var hash = sha256.ComputeHash(Encoding.UTF8.GetBytes(data));
|
||||
return Convert.ToHexString(hash).ToLower();
|
||||
}
|
||||
|
||||
private void UpdateEventIndex(DatabaseEvent evt)
|
||||
{
|
||||
var indexFile = Path.Combine(_indexPath, Path.GetFileNameWithoutExtension(_currentEventFile) + ".idx");
|
||||
var indexEntry = $"{evt.Id},{evt.Timestamp},{evt.Table},{evt.Operation}";
|
||||
|
||||
File.AppendAllText(indexFile, indexEntry + Environment.NewLine);
|
||||
}
|
||||
}
|
||||
}
|
||||
172
Services/OptimizedFileService.cs
Normal file
172
Services/OptimizedFileService.cs
Normal file
@ -0,0 +1,172 @@
|
||||
using EasyCompressor;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
|
||||
namespace DatabaseSnapshotsService.Services
|
||||
{
|
||||
public class OptimizedFileService
|
||||
{
|
||||
private const int DefaultBufferSize = 64 * 1024; // 64KB buffer
|
||||
private const int LargeFileThreshold = 100 * 1024 * 1024; // 100MB
|
||||
private const int ParallelThreshold = 50 * 1024 * 1024; // 50MB
|
||||
private readonly LZ4Compressor _lz4 = new LZ4Compressor();
|
||||
|
||||
/// <summary>
|
||||
/// Streaming LZ4 compression using EasyCompressor.LZ4
|
||||
/// </summary>
|
||||
public async Task CompressFileStreamingAsync(string sourcePath, string destinationPath, int bufferSize = DefaultBufferSize)
|
||||
{
|
||||
try
|
||||
{
|
||||
using var sourceStream = new FileStream(sourcePath, FileMode.Open, FileAccess.Read, FileShare.Read, bufferSize, FileOptions.Asynchronous);
|
||||
using var destinationStream = new FileStream(destinationPath, FileMode.Create, FileAccess.Write, FileShare.None, bufferSize, FileOptions.Asynchronous);
|
||||
await Task.Run(() => _lz4.Compress(sourceStream, destinationStream));
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Streaming LZ4 compression failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Streaming LZ4 decompression using EasyCompressor.LZ4
|
||||
/// </summary>
|
||||
public async Task DecompressFileStreamingAsync(string sourcePath, string destinationPath, int bufferSize = DefaultBufferSize)
|
||||
{
|
||||
try
|
||||
{
|
||||
using var sourceStream = new FileStream(sourcePath, FileMode.Open, FileAccess.Read, FileShare.Read, bufferSize, FileOptions.Asynchronous);
|
||||
using var destinationStream = new FileStream(destinationPath, FileMode.Create, FileAccess.Write, FileShare.None, bufferSize, FileOptions.Asynchronous);
|
||||
await Task.Run(() => _lz4.Decompress(sourceStream, destinationStream));
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Streaming LZ4 decompression failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Optimized checksum calculation using streaming
|
||||
/// </summary>
|
||||
public async Task<string> CalculateChecksumStreamingAsync(string filePath, int bufferSize = DefaultBufferSize)
|
||||
{
|
||||
try
|
||||
{
|
||||
using var sha256 = SHA256.Create();
|
||||
using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, bufferSize, FileOptions.Asynchronous);
|
||||
|
||||
var buffer = new byte[bufferSize];
|
||||
int bytesRead;
|
||||
|
||||
while ((bytesRead = await stream.ReadAsync(buffer, 0, buffer.Length)) > 0)
|
||||
{
|
||||
sha256.TransformBlock(buffer, 0, bytesRead, null, 0);
|
||||
}
|
||||
|
||||
sha256.TransformFinalBlock(Array.Empty<byte>(), 0, 0);
|
||||
return Convert.ToBase64String(sha256.Hash!);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Checksum calculation failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Parallel checksum calculation for large files
|
||||
/// </summary>
|
||||
public async Task<string> CalculateChecksumParallelAsync(string filePath)
|
||||
{
|
||||
try
|
||||
{
|
||||
var fileInfo = new FileInfo(filePath);
|
||||
if (fileInfo.Length < LargeFileThreshold)
|
||||
{
|
||||
return await CalculateChecksumStreamingAsync(filePath);
|
||||
}
|
||||
|
||||
// For large files, use parallel processing
|
||||
using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, DefaultBufferSize, FileOptions.Asynchronous);
|
||||
var fileBytes = new byte[fileInfo.Length];
|
||||
await stream.ReadAsync(fileBytes, 0, (int)fileInfo.Length);
|
||||
|
||||
return await Task.Run(() =>
|
||||
{
|
||||
using var sha256 = SHA256.Create();
|
||||
return Convert.ToBase64String(sha256.ComputeHash(fileBytes));
|
||||
});
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Parallel checksum calculation failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Optimized file reading with memory mapping for very large files
|
||||
/// </summary>
|
||||
public async Task<byte[]> ReadFileOptimizedAsync(string filePath)
|
||||
{
|
||||
try
|
||||
{
|
||||
var fileInfo = new FileInfo(filePath);
|
||||
|
||||
if (fileInfo.Length > LargeFileThreshold)
|
||||
{
|
||||
// For very large files, use streaming
|
||||
using var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.Read, DefaultBufferSize, FileOptions.Asynchronous);
|
||||
using var memoryStream = new MemoryStream();
|
||||
await stream.CopyToAsync(memoryStream);
|
||||
return memoryStream.ToArray();
|
||||
}
|
||||
else
|
||||
{
|
||||
// For smaller files, use direct read
|
||||
return await File.ReadAllBytesAsync(filePath);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"File reading failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Optimized file writing with buffering
|
||||
/// </summary>
|
||||
public async Task WriteFileOptimizedAsync(string filePath, byte[] data, int bufferSize = DefaultBufferSize)
|
||||
{
|
||||
try
|
||||
{
|
||||
using var stream = new FileStream(filePath, FileMode.Create, FileAccess.Write, FileShare.None, bufferSize, FileOptions.Asynchronous);
|
||||
await stream.WriteAsync(data, 0, data.Length);
|
||||
await stream.FlushAsync();
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"File writing failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Get optimal buffer size based on file size
|
||||
/// </summary>
|
||||
public static int GetOptimalBufferSize(long fileSize)
|
||||
{
|
||||
if (fileSize < 1024 * 1024) // < 1MB
|
||||
return 8 * 1024; // 8KB
|
||||
else if (fileSize < 100 * 1024 * 1024) // < 100MB
|
||||
return 64 * 1024; // 64KB
|
||||
else
|
||||
return 256 * 1024; // 256KB
|
||||
}
|
||||
|
||||
/// <summary>
|
||||
/// Determine if parallel processing should be used
|
||||
/// </summary>
|
||||
public static bool ShouldUseParallelProcessing(long fileSize)
|
||||
{
|
||||
return fileSize >= ParallelThreshold;
|
||||
}
|
||||
}
|
||||
}
|
||||
599
Services/RecoveryService.cs
Normal file
599
Services/RecoveryService.cs
Normal file
@ -0,0 +1,599 @@
|
||||
using System.Text.Json;
|
||||
using DatabaseSnapshotsService.Models;
|
||||
using MySqlConnector;
|
||||
using System.Text;
|
||||
|
||||
namespace DatabaseSnapshotsService.Services
|
||||
{
|
||||
public class RecoveryService
|
||||
{
|
||||
private readonly SnapshotConfiguration _config;
|
||||
private readonly string _recoveryPointsPath;
|
||||
private readonly string _eventsPath;
|
||||
private readonly OptimizedFileService _fileService;
|
||||
private readonly EncryptionService _encryptionService;
|
||||
private int _nextPointId = 1;
|
||||
|
||||
public RecoveryService(SnapshotConfiguration config)
|
||||
{
|
||||
_config = config;
|
||||
_recoveryPointsPath = Path.Combine(config.EventStore.Path, "recovery_points");
|
||||
_eventsPath = config.EventStore.Path;
|
||||
_fileService = new OptimizedFileService();
|
||||
|
||||
// Initialize encryption service - match SnapshotService pattern
|
||||
_encryptionService = new EncryptionService(
|
||||
config.Security.EncryptionKey,
|
||||
config.Security.Encryption
|
||||
);
|
||||
|
||||
// Ensure directories exist
|
||||
Directory.CreateDirectory(_recoveryPointsPath);
|
||||
Directory.CreateDirectory(_eventsPath);
|
||||
|
||||
// Load next ID from existing recovery points
|
||||
LoadNextPointId();
|
||||
}
|
||||
|
||||
public async Task<RecoveryPoint> CreateRecoveryPointAsync(string name, string? description = null)
|
||||
{
|
||||
// Check if name already exists
|
||||
if (await GetRecoveryPointAsync(name) != null)
|
||||
{
|
||||
throw new ArgumentException($"Recovery point '{name}' already exists");
|
||||
}
|
||||
|
||||
var point = new RecoveryPoint
|
||||
{
|
||||
Id = _nextPointId++,
|
||||
Name = name,
|
||||
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
|
||||
Description = description,
|
||||
CreatedAt = DateTime.UtcNow,
|
||||
EventCount = await GetTotalEventCountAsync(),
|
||||
LastEventId = await GetLastEventIdAsync()
|
||||
};
|
||||
|
||||
// Save recovery point
|
||||
await SaveRecoveryPointAsync(point);
|
||||
|
||||
return point;
|
||||
}
|
||||
|
||||
public async Task<List<RecoveryPoint>> ListRecoveryPointsAsync()
|
||||
{
|
||||
var points = new List<RecoveryPoint>();
|
||||
var pointFiles = Directory.GetFiles(_recoveryPointsPath, "*.json");
|
||||
|
||||
foreach (var file in pointFiles)
|
||||
{
|
||||
try
|
||||
{
|
||||
var jsonBytes = await _fileService.ReadFileOptimizedAsync(file);
|
||||
var json = Encoding.UTF8.GetString(jsonBytes);
|
||||
var point = JsonSerializer.Deserialize<RecoveryPoint>(json);
|
||||
if (point != null)
|
||||
{
|
||||
points.Add(point);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Warning: Could not load recovery point from {file}: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
return points.OrderByDescending(p => p.CreatedAt).ToList();
|
||||
}
|
||||
|
||||
public async Task<RecoveryPoint?> GetRecoveryPointAsync(string name)
|
||||
{
|
||||
var pointFiles = Directory.GetFiles(_recoveryPointsPath, "*.json");
|
||||
|
||||
foreach (var file in pointFiles)
|
||||
{
|
||||
try
|
||||
{
|
||||
var jsonBytes = await _fileService.ReadFileOptimizedAsync(file);
|
||||
var json = Encoding.UTF8.GetString(jsonBytes);
|
||||
var point = JsonSerializer.Deserialize<RecoveryPoint>(json);
|
||||
if (point?.Name == name)
|
||||
{
|
||||
return point;
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Warning: Could not load recovery point from {file}: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
public async Task<RestorePreview> PreviewRestoreAsync(long timestamp)
|
||||
{
|
||||
var preview = new RestorePreview
|
||||
{
|
||||
TargetTimestamp = timestamp,
|
||||
EventCount = 0,
|
||||
AffectedTables = new List<string>(),
|
||||
EstimatedDuration = TimeSpan.Zero,
|
||||
Warnings = new List<string>()
|
||||
};
|
||||
|
||||
// Find the closest snapshot before the target timestamp
|
||||
var snapshotService = new SnapshotService(_config);
|
||||
var snapshots = await snapshotService.ListSnapshotsAsync();
|
||||
var closestSnapshot = snapshots
|
||||
.Where(s => s.Timestamp <= timestamp)
|
||||
.OrderByDescending(s => s.Timestamp)
|
||||
.FirstOrDefault();
|
||||
|
||||
if (closestSnapshot != null)
|
||||
{
|
||||
preview.SnapshotId = closestSnapshot.Id;
|
||||
preview.Warnings.Add($"Will use snapshot {closestSnapshot.Id} as base");
|
||||
}
|
||||
else
|
||||
{
|
||||
preview.Warnings.Add("No suitable snapshot found - will restore from scratch");
|
||||
}
|
||||
|
||||
// Count events that would be applied
|
||||
var events = await GetEventsInRangeAsync(closestSnapshot?.Timestamp ?? 0, timestamp);
|
||||
preview.EventCount = events.Count;
|
||||
|
||||
// Get affected tables
|
||||
preview.AffectedTables = events
|
||||
.Select(e => e.Table)
|
||||
.Distinct()
|
||||
.ToList();
|
||||
|
||||
// Estimate duration (rough calculation)
|
||||
preview.EstimatedDuration = TimeSpan.FromSeconds(events.Count * 0.001); // 1ms per event
|
||||
|
||||
return preview;
|
||||
}
|
||||
|
||||
public async Task RestoreAsync(long timestamp)
|
||||
{
|
||||
try
|
||||
{
|
||||
Console.WriteLine("=== PERFORMING ACTUAL RECOVERY ===");
|
||||
Console.WriteLine("This will modify the target database!");
|
||||
Console.WriteLine($"Starting restore to timestamp {timestamp}...");
|
||||
|
||||
// Find the target snapshot and build restore chain
|
||||
var (targetSnapshot, restoreChain) = await BuildRestoreChainAsync(timestamp);
|
||||
if (targetSnapshot == null)
|
||||
{
|
||||
throw new Exception($"No snapshot found for timestamp {timestamp}");
|
||||
}
|
||||
|
||||
Console.WriteLine($"Target snapshot: {targetSnapshot.Id} ({targetSnapshot.Type})");
|
||||
Console.WriteLine($"Restore chain: {restoreChain.Count} snapshots");
|
||||
|
||||
// Restore the full snapshot (first in chain)
|
||||
var fullSnapshot = restoreChain.First();
|
||||
Console.WriteLine($"Restoring full snapshot {fullSnapshot.Id}...");
|
||||
await RestoreFromSnapshotAsync(fullSnapshot);
|
||||
|
||||
// Apply incremental snapshots in order
|
||||
var incrementals = restoreChain.Skip(1).ToList();
|
||||
if (incrementals.Any())
|
||||
{
|
||||
Console.WriteLine($"Applying {incrementals.Count} incremental snapshots...");
|
||||
foreach (var incremental in incrementals)
|
||||
{
|
||||
Console.WriteLine($"Applying incremental snapshot {incremental.Id}...");
|
||||
await ApplyIncrementalSnapshotAsync(incremental);
|
||||
}
|
||||
}
|
||||
|
||||
Console.WriteLine("Validating restore...");
|
||||
await ValidateRestoreAsync();
|
||||
Console.WriteLine("Database validation passed");
|
||||
Console.WriteLine("Restore completed successfully");
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Restore failed: {ex.Message}");
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
private async Task<(SnapshotInfo? TargetSnapshot, List<SnapshotInfo> RestoreChain)> BuildRestoreChainAsync(long timestamp)
|
||||
{
|
||||
var snapshotService = new SnapshotService(_config);
|
||||
var snapshots = await snapshotService.ListSnapshotsAsync();
|
||||
|
||||
// Find the target snapshot (closest to timestamp)
|
||||
var targetSnapshot = snapshots
|
||||
.Where(s => s.Timestamp <= timestamp)
|
||||
.OrderByDescending(s => s.Timestamp)
|
||||
.FirstOrDefault();
|
||||
|
||||
if (targetSnapshot == null)
|
||||
return (null, new List<SnapshotInfo>());
|
||||
|
||||
// Build restore chain: full snapshot + all incrementals up to target
|
||||
var restoreChain = new List<SnapshotInfo>();
|
||||
|
||||
if (targetSnapshot.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
// Target is a full snapshot, just restore it
|
||||
restoreChain.Add(targetSnapshot);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Target is incremental, need to find the full snapshot and all incrementals
|
||||
var current = targetSnapshot;
|
||||
var chain = new List<SnapshotInfo>();
|
||||
|
||||
// Walk backwards to find the full snapshot
|
||||
while (current != null)
|
||||
{
|
||||
chain.Insert(0, current); // Add to front to maintain order
|
||||
|
||||
if (current.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
|
||||
break;
|
||||
|
||||
// Find parent snapshot
|
||||
current = snapshots.FirstOrDefault(s => s.Id == current.ParentSnapshotId);
|
||||
}
|
||||
|
||||
restoreChain = chain;
|
||||
}
|
||||
|
||||
return (targetSnapshot, restoreChain);
|
||||
}
|
||||
|
||||
private async Task ApplyIncrementalSnapshotAsync(SnapshotInfo incremental)
|
||||
{
|
||||
Console.WriteLine($"Applying incremental snapshot {incremental.Id}...");
|
||||
|
||||
if (!File.Exists(incremental.FilePath))
|
||||
{
|
||||
throw new FileNotFoundException($"Incremental snapshot file not found: {incremental.FilePath}");
|
||||
}
|
||||
|
||||
// Read and decompress/decrypt the snapshot file
|
||||
var sqlContent = await ReadSnapshotFileAsync(incremental.FilePath);
|
||||
|
||||
// Extract connection details from configuration
|
||||
var connectionString = _config.ConnectionString;
|
||||
var server = ExtractValue(connectionString, "Server") ?? "localhost";
|
||||
var port = ExtractValue(connectionString, "Port") ?? "3306";
|
||||
var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
|
||||
var userId = ExtractValue(connectionString, "Uid") ?? "root";
|
||||
var password = ExtractValue(connectionString, "Pwd") ?? "";
|
||||
|
||||
// Build mysql command arguments
|
||||
var mysqlArgs = $"-h{server} -P{port} -u{userId}";
|
||||
if (!string.IsNullOrEmpty(password))
|
||||
{
|
||||
mysqlArgs += $" -p{password}";
|
||||
}
|
||||
mysqlArgs += $" {database}";
|
||||
|
||||
// Apply the SQL content using mysql via stdin
|
||||
var startInfo = new System.Diagnostics.ProcessStartInfo
|
||||
{
|
||||
FileName = "mysql",
|
||||
Arguments = mysqlArgs,
|
||||
RedirectStandardInput = true,
|
||||
RedirectStandardOutput = true,
|
||||
RedirectStandardError = true,
|
||||
UseShellExecute = false,
|
||||
CreateNoWindow = true
|
||||
};
|
||||
|
||||
using var process = System.Diagnostics.Process.Start(startInfo);
|
||||
if (process != null)
|
||||
{
|
||||
// Write the SQL content to mysql stdin
|
||||
await process.StandardInput.WriteAsync(sqlContent);
|
||||
await process.StandardInput.FlushAsync();
|
||||
process.StandardInput.Close();
|
||||
|
||||
string stdOut = await process.StandardOutput.ReadToEndAsync();
|
||||
string stdErr = await process.StandardError.ReadToEndAsync();
|
||||
await process.WaitForExitAsync();
|
||||
|
||||
if (process.ExitCode != 0)
|
||||
{
|
||||
Console.WriteLine($"[mysql stdout] {stdOut}");
|
||||
Console.WriteLine($"[mysql stderr] {stdErr}");
|
||||
throw new Exception($"mysql failed with exit code {process.ExitCode}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async Task<long> GetTotalEventCountAsync()
|
||||
{
|
||||
var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
|
||||
long totalCount = 0;
|
||||
|
||||
foreach (var file in eventFiles)
|
||||
{
|
||||
try
|
||||
{
|
||||
var lines = await File.ReadAllLinesAsync(file);
|
||||
totalCount += lines.Length;
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Warning: Could not read event file {file}: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
return totalCount;
|
||||
}
|
||||
|
||||
private async Task<long> GetLastEventIdAsync()
|
||||
{
|
||||
var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
|
||||
long lastId = 0;
|
||||
|
||||
foreach (var file in eventFiles.OrderByDescending(f => f))
|
||||
{
|
||||
try
|
||||
{
|
||||
var lines = await File.ReadAllLinesAsync(file);
|
||||
if (lines.Length > 0)
|
||||
{
|
||||
var lastLine = lines.Last();
|
||||
var lastEvent = JsonSerializer.Deserialize<DatabaseEvent>(lastLine);
|
||||
if (lastEvent != null && lastEvent.Id > lastId)
|
||||
{
|
||||
lastId = lastEvent.Id;
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Warning: Could not read event file {file}: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
return lastId;
|
||||
}
|
||||
|
||||
private async Task<List<DatabaseEvent>> GetEventsInRangeAsync(long fromTimestamp, long toTimestamp)
|
||||
{
|
||||
var events = new List<DatabaseEvent>();
|
||||
var eventFiles = Directory.GetFiles(_eventsPath, "events_*.json");
|
||||
|
||||
foreach (var file in eventFiles)
|
||||
{
|
||||
try
|
||||
{
|
||||
var lines = await File.ReadAllLinesAsync(file);
|
||||
foreach (var line in lines)
|
||||
{
|
||||
var evt = JsonSerializer.Deserialize<DatabaseEvent>(line);
|
||||
if (evt != null && evt.Timestamp >= fromTimestamp && evt.Timestamp <= toTimestamp)
|
||||
{
|
||||
events.Add(evt);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Warning: Could not read event file {file}: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
return events.OrderBy(e => e.Timestamp).ToList();
|
||||
}
|
||||
|
||||
private async Task RestoreFromSnapshotAsync(SnapshotInfo snapshot)
|
||||
{
|
||||
Console.WriteLine($"Restoring database from snapshot {snapshot.Id}...");
|
||||
|
||||
if (!File.Exists(snapshot.FilePath))
|
||||
{
|
||||
throw new FileNotFoundException($"Snapshot file not found: {snapshot.FilePath}");
|
||||
}
|
||||
|
||||
// Use programmatic restoration (handles encryption/compression better)
|
||||
await RestoreProgrammaticallyAsync(snapshot);
|
||||
}
|
||||
|
||||
private async Task RestoreProgrammaticallyAsync(SnapshotInfo snapshot)
|
||||
{
|
||||
// Read and decompress the snapshot file
|
||||
var sqlContent = await ReadSnapshotFileAsync(snapshot.FilePath);
|
||||
|
||||
// Create a temporary file with the SQL content
|
||||
var tempFile = Path.GetTempFileName();
|
||||
await File.WriteAllTextAsync(tempFile, sqlContent);
|
||||
|
||||
try
|
||||
{
|
||||
// Extract connection details from configuration
|
||||
var connectionString = _config.ConnectionString;
|
||||
var server = ExtractValue(connectionString, "Server") ?? "localhost";
|
||||
var port = ExtractValue(connectionString, "Port") ?? "3306";
|
||||
var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
|
||||
var userId = ExtractValue(connectionString, "Uid") ?? "root";
|
||||
var password = ExtractValue(connectionString, "Pwd") ?? "";
|
||||
|
||||
// Build mysql command arguments
|
||||
var mysqlArgs = $"-h{server} -P{port} -u{userId}";
|
||||
if (!string.IsNullOrEmpty(password))
|
||||
{
|
||||
mysqlArgs += $" -p{password}";
|
||||
}
|
||||
mysqlArgs += $" {database}";
|
||||
|
||||
// Use mysql command to restore the database
|
||||
var startInfo = new System.Diagnostics.ProcessStartInfo
|
||||
{
|
||||
FileName = "mysql",
|
||||
Arguments = mysqlArgs,
|
||||
RedirectStandardInput = true,
|
||||
RedirectStandardOutput = true,
|
||||
RedirectStandardError = true,
|
||||
UseShellExecute = false,
|
||||
CreateNoWindow = true
|
||||
};
|
||||
|
||||
using var process = new System.Diagnostics.Process { StartInfo = startInfo };
|
||||
|
||||
process.ErrorDataReceived += (sender, e) =>
|
||||
{
|
||||
if (!string.IsNullOrEmpty(e.Data))
|
||||
{
|
||||
Console.WriteLine($"[mysql restore] {e.Data}");
|
||||
}
|
||||
};
|
||||
|
||||
process.Start();
|
||||
process.BeginErrorReadLine();
|
||||
|
||||
// Send the SQL content to mysql via stdin
|
||||
using var writer = process.StandardInput;
|
||||
await writer.WriteAsync(sqlContent);
|
||||
await writer.FlushAsync();
|
||||
writer.Close();
|
||||
|
||||
await process.WaitForExitAsync();
|
||||
|
||||
if (process.ExitCode != 0)
|
||||
{
|
||||
throw new Exception($"mysql restore failed with exit code {process.ExitCode}");
|
||||
}
|
||||
|
||||
Console.WriteLine("Database restore completed successfully using mysql command");
|
||||
}
|
||||
finally
|
||||
{
|
||||
// Clean up temporary file
|
||||
if (File.Exists(tempFile))
|
||||
{
|
||||
File.Delete(tempFile);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private async Task<string> ReadSnapshotFileAsync(string filePath)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Check if file is encrypted and compressed
|
||||
if (filePath.EndsWith(".lz4.enc"))
|
||||
{
|
||||
// First decrypt, then decompress
|
||||
var decryptedPath = filePath.Replace(".lz4.enc", ".lz4.tmp");
|
||||
var decompressedPath = filePath.Replace(".lz4.enc", ".sql.tmp");
|
||||
|
||||
try
|
||||
{
|
||||
// Decrypt the file using the instance field
|
||||
await _encryptionService.DecryptFileAsync(filePath, decryptedPath);
|
||||
|
||||
// Decompress the decrypted file
|
||||
await _fileService.DecompressFileStreamingAsync(decryptedPath, decompressedPath);
|
||||
|
||||
// Read the final SQL content
|
||||
var content = await _fileService.ReadFileOptimizedAsync(decompressedPath);
|
||||
return Encoding.UTF8.GetString(content);
|
||||
}
|
||||
finally
|
||||
{
|
||||
// Clean up temporary files
|
||||
if (File.Exists(decryptedPath)) File.Delete(decryptedPath);
|
||||
if (File.Exists(decompressedPath)) File.Delete(decompressedPath);
|
||||
}
|
||||
}
|
||||
else if (filePath.EndsWith(".lz4"))
|
||||
{
|
||||
// Only compressed, not encrypted
|
||||
var tempPath = filePath.Replace(".lz4", ".tmp");
|
||||
await _fileService.DecompressFileStreamingAsync(filePath, tempPath);
|
||||
|
||||
var content = await _fileService.ReadFileOptimizedAsync(tempPath);
|
||||
File.Delete(tempPath); // Clean up temp file
|
||||
return Encoding.UTF8.GetString(content);
|
||||
}
|
||||
else if (filePath.EndsWith(".enc"))
|
||||
{
|
||||
// Only encrypted, not compressed
|
||||
var tempPath = filePath.Replace(".enc", ".tmp");
|
||||
await _encryptionService.DecryptFileAsync(filePath, tempPath);
|
||||
|
||||
var content = await _fileService.ReadFileOptimizedAsync(tempPath);
|
||||
File.Delete(tempPath); // Clean up temp file
|
||||
return Encoding.UTF8.GetString(content);
|
||||
}
|
||||
else
|
||||
{
|
||||
// Plain text file
|
||||
var content = await _fileService.ReadFileOptimizedAsync(filePath);
|
||||
return Encoding.UTF8.GetString(content);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Failed to read snapshot file {filePath}: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task ValidateRestoreAsync()
|
||||
{
|
||||
// Basic validation - check if database is accessible and has expected data
|
||||
using var connection = new MySqlConnection(_config.ConnectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
// Check if we can query the database
|
||||
using var command = new MySqlCommand("SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = DATABASE()", connection);
|
||||
var tableCount = await command.ExecuteScalarAsync();
|
||||
|
||||
if (Convert.ToInt32(tableCount) == 0)
|
||||
{
|
||||
throw new Exception("Database validation failed: No tables found after restore");
|
||||
}
|
||||
}
|
||||
|
||||
private async Task SaveRecoveryPointAsync(RecoveryPoint point)
|
||||
{
|
||||
var pointFile = Path.Combine(_recoveryPointsPath, $"{point.Name}.json");
|
||||
var json = JsonSerializer.Serialize(point, new JsonSerializerOptions { WriteIndented = true });
|
||||
var jsonBytes = Encoding.UTF8.GetBytes(json);
|
||||
|
||||
await _fileService.WriteFileOptimizedAsync(pointFile, jsonBytes);
|
||||
}
|
||||
|
||||
private void LoadNextPointId()
|
||||
{
|
||||
var pointFiles = Directory.GetFiles(_recoveryPointsPath, "*.json");
|
||||
if (pointFiles.Length > 0)
|
||||
{
|
||||
var maxId = pointFiles
|
||||
.Select(f => Path.GetFileNameWithoutExtension(f))
|
||||
.Where(name => int.TryParse(name, out _))
|
||||
.Select(int.Parse)
|
||||
.DefaultIfEmpty(0)
|
||||
.Max();
|
||||
|
||||
_nextPointId = maxId + 1;
|
||||
}
|
||||
}
|
||||
|
||||
private string? ExtractValue(string connectionString, string key)
|
||||
{
|
||||
var pairs = connectionString.Split(';');
|
||||
foreach (var pair in pairs)
|
||||
{
|
||||
var keyValue = pair.Split('=');
|
||||
if (keyValue.Length == 2 && keyValue[0].Trim().Equals(key, StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return keyValue[1].Trim();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
}
|
||||
}
|
||||
633
Services/SnapshotService.cs
Normal file
633
Services/SnapshotService.cs
Normal file
@ -0,0 +1,633 @@
|
||||
using System.ComponentModel.DataAnnotations;
|
||||
using System.IO.Compression;
|
||||
using System.Security.Cryptography;
|
||||
using System.Text;
|
||||
using System.Text.Json;
|
||||
using DatabaseSnapshotsService.Models;
|
||||
using MySqlConnector;
|
||||
using DatabaseSnapshotsService.Services;
|
||||
|
||||
namespace DatabaseSnapshotsService.Services
|
||||
{
|
||||
public class SnapshotService
|
||||
{
|
||||
private readonly SnapshotConfiguration _config;
|
||||
private readonly string _snapshotsPath;
|
||||
private readonly string _metadataPath;
|
||||
private readonly EncryptionService _encryptionService;
|
||||
private readonly OptimizedFileService _fileService;
|
||||
private int _nextId = 1;
|
||||
|
||||
public SnapshotService(SnapshotConfiguration config)
|
||||
{
|
||||
_config = config;
|
||||
_snapshotsPath = Path.GetFullPath(config.SnapshotStorage.Path);
|
||||
_metadataPath = Path.Combine(_snapshotsPath, "metadata");
|
||||
|
||||
// Initialize encryption service - match RecoveryService pattern
|
||||
_encryptionService = new EncryptionService(
|
||||
config.Security.EncryptionKey,
|
||||
config.Security.Encryption
|
||||
);
|
||||
|
||||
_fileService = new OptimizedFileService();
|
||||
|
||||
// Ensure directories exist
|
||||
Directory.CreateDirectory(_snapshotsPath);
|
||||
Directory.CreateDirectory(_metadataPath);
|
||||
|
||||
// Load next ID from existing snapshots
|
||||
LoadNextId();
|
||||
}
|
||||
|
||||
public async Task<SnapshotInfo> CreateSnapshotAsync(string name, string? description = null, int? userId = null)
|
||||
{
|
||||
// Validate and sanitize inputs
|
||||
var nameValidation = InputValidation.SnapshotValidation.ValidateSnapshotName(name);
|
||||
if (nameValidation != ValidationResult.Success)
|
||||
{
|
||||
throw new ArgumentException($"Invalid snapshot name: {nameValidation.ErrorMessage}");
|
||||
}
|
||||
|
||||
var sanitizedName = InputValidation.SanitizeString(name);
|
||||
var sanitizedDescription = InputValidation.SanitizeString(description ?? string.Empty);
|
||||
|
||||
if (!string.IsNullOrEmpty(sanitizedDescription))
|
||||
{
|
||||
var descriptionValidation = InputValidation.SnapshotValidation.ValidateSnapshotDescription(sanitizedDescription);
|
||||
if (descriptionValidation != ValidationResult.Success)
|
||||
{
|
||||
throw new ArgumentException($"Invalid snapshot description: {descriptionValidation.ErrorMessage}");
|
||||
}
|
||||
}
|
||||
|
||||
var snapshot = new SnapshotInfo
|
||||
{
|
||||
Id = _nextId++,
|
||||
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
|
||||
Status = SnapshotStatus.Creating.ToString(),
|
||||
Description = string.IsNullOrEmpty(sanitizedDescription) ? sanitizedName : sanitizedDescription,
|
||||
UserId = userId,
|
||||
CreatedAt = DateTime.UtcNow
|
||||
};
|
||||
|
||||
try
|
||||
{
|
||||
await CreateFullSnapshotAsync(snapshot);
|
||||
await SaveSnapshotMetadataAsync(snapshot);
|
||||
snapshot.Status = SnapshotStatus.Completed.ToString();
|
||||
await SaveSnapshotMetadataAsync(snapshot);
|
||||
return snapshot;
|
||||
}
|
||||
catch (Exception)
|
||||
{
|
||||
snapshot.Status = SnapshotStatus.Failed.ToString();
|
||||
await SaveSnapshotMetadataAsync(snapshot);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<List<SnapshotInfo>> ListSnapshotsAsync(string? type = null, int limit = 50)
|
||||
{
|
||||
var snapshots = new List<SnapshotInfo>();
|
||||
var metadataFiles = Directory.GetFiles(_metadataPath, "*.json");
|
||||
|
||||
foreach (var file in metadataFiles)
|
||||
{
|
||||
try
|
||||
{
|
||||
var json = await File.ReadAllTextAsync(file);
|
||||
var snapshot = JsonSerializer.Deserialize<SnapshotInfo>(json);
|
||||
|
||||
if (snapshot != null && (string.IsNullOrEmpty(type) || snapshot.Type.Equals(type, StringComparison.OrdinalIgnoreCase)))
|
||||
{
|
||||
snapshots.Add(snapshot);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Warning: Could not load snapshot metadata from {file}: {ex.Message}");
|
||||
}
|
||||
}
|
||||
|
||||
return snapshots.OrderByDescending(s => s.CreatedAt).Take(limit).ToList();
|
||||
}
|
||||
|
||||
public async Task<SnapshotInfo?> GetSnapshotAsync(int id)
|
||||
{
|
||||
var metadataFile = Path.Combine(_metadataPath, $"{id}.json");
|
||||
|
||||
if (!File.Exists(metadataFile))
|
||||
return null;
|
||||
|
||||
try
|
||||
{
|
||||
var json = await File.ReadAllTextAsync(metadataFile);
|
||||
return JsonSerializer.Deserialize<SnapshotInfo>(json);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
Console.WriteLine($"Error loading snapshot {id}: {ex.Message}");
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task DeleteSnapshotAsync(int id)
|
||||
{
|
||||
var snapshot = await GetSnapshotAsync(id);
|
||||
if (snapshot == null)
|
||||
throw new ArgumentException($"Snapshot {id} not found");
|
||||
|
||||
// Delete snapshot file
|
||||
if (File.Exists(snapshot.FilePath))
|
||||
{
|
||||
File.Delete(snapshot.FilePath);
|
||||
}
|
||||
|
||||
// Delete metadata
|
||||
var metadataFile = Path.Combine(_metadataPath, $"{id}.json");
|
||||
if (File.Exists(metadataFile))
|
||||
{
|
||||
File.Delete(metadataFile);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task CreateFullSnapshotAsync(SnapshotInfo snapshot)
|
||||
{
|
||||
var fileName = $"snapshot_{snapshot.Id}_{snapshot.Timestamp}.sql";
|
||||
var filePath = Path.Combine(_snapshotsPath, fileName);
|
||||
snapshot.FilePath = filePath;
|
||||
|
||||
// Get current binlog status before creating snapshot
|
||||
var binlogStatus = await GetCurrentBinlogStatusAsync();
|
||||
snapshot.BinlogFile = binlogStatus.File;
|
||||
snapshot.BinlogPosition = binlogStatus.Position;
|
||||
snapshot.Type = "Full";
|
||||
|
||||
// Extract connection details from configuration
|
||||
var connectionString = _config.ConnectionString;
|
||||
var server = ExtractValue(connectionString, "Server") ?? "localhost";
|
||||
var port = ExtractValue(connectionString, "Port") ?? "3306";
|
||||
var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
|
||||
var dbUserId = ExtractValue(connectionString, "Uid") ?? "root";
|
||||
var password = ExtractValue(connectionString, "Pwd") ?? "";
|
||||
|
||||
// Build mariadb-dump command arguments with optimizations
|
||||
var dumpArgs = $"-h{server} -P{port} -u{dbUserId}";
|
||||
if (!string.IsNullOrEmpty(password))
|
||||
{
|
||||
dumpArgs += $" -p{password}";
|
||||
}
|
||||
|
||||
// Add performance and consistency optimizations based on configuration
|
||||
var optimizations = _config.SnapshotStorage.DumpOptimizations;
|
||||
|
||||
if (optimizations.SingleTransaction)
|
||||
{
|
||||
dumpArgs += $" --single-transaction";
|
||||
}
|
||||
if (optimizations.IncludeRoutines)
|
||||
{
|
||||
dumpArgs += $" --routines";
|
||||
}
|
||||
if (optimizations.IncludeTriggers)
|
||||
{
|
||||
dumpArgs += $" --triggers";
|
||||
}
|
||||
if (optimizations.IncludeEvents)
|
||||
{
|
||||
dumpArgs += $" --events";
|
||||
}
|
||||
if (optimizations.Quick)
|
||||
{
|
||||
dumpArgs += $" --quick";
|
||||
}
|
||||
if (optimizations.OrderByPrimary)
|
||||
{
|
||||
dumpArgs += $" --order-by-primary";
|
||||
}
|
||||
if (optimizations.FlushLogs)
|
||||
{
|
||||
dumpArgs += $" --flush-logs";
|
||||
}
|
||||
if (optimizations.MasterData > 0)
|
||||
{
|
||||
dumpArgs += $" --master-data={optimizations.MasterData}";
|
||||
}
|
||||
if (optimizations.Compact)
|
||||
{
|
||||
dumpArgs += $" --compact";
|
||||
}
|
||||
if (optimizations.NoAutocommit)
|
||||
{
|
||||
dumpArgs += $" --no-autocommit";
|
||||
}
|
||||
if (optimizations.LockTables)
|
||||
{
|
||||
dumpArgs += $" --lock-tables";
|
||||
} else {
|
||||
dumpArgs += $" --skip-lock-tables";
|
||||
}
|
||||
dumpArgs += $" --add-drop-database";
|
||||
dumpArgs += $" --add-drop-table";
|
||||
dumpArgs += $" --create-options";
|
||||
if (optimizations.ExtendedInsert)
|
||||
{
|
||||
dumpArgs += $" --extended-insert";
|
||||
}
|
||||
if (optimizations.CompleteInsert)
|
||||
{
|
||||
dumpArgs += $" --complete-insert";
|
||||
}
|
||||
if (optimizations.HexBlob)
|
||||
{
|
||||
dumpArgs += $" --hex-blob";
|
||||
}
|
||||
dumpArgs += $" --net_buffer_length={optimizations.NetBufferLength}";
|
||||
dumpArgs += $" --max_allowed_packet={optimizations.MaxAllowedPacket}";
|
||||
if (optimizations.ExcludeTables.Any())
|
||||
{
|
||||
foreach (var table in optimizations.ExcludeTables)
|
||||
{
|
||||
dumpArgs += $" --ignore-table={database}.{table}";
|
||||
}
|
||||
}
|
||||
if (optimizations.IncludeTables.Any())
|
||||
{
|
||||
dumpArgs += $" --tables";
|
||||
foreach (var table in optimizations.IncludeTables)
|
||||
{
|
||||
dumpArgs += $" {table}";
|
||||
}
|
||||
dumpArgs += $" {database}";
|
||||
}
|
||||
else
|
||||
{
|
||||
dumpArgs += $" --databases {database}";
|
||||
}
|
||||
|
||||
// Use mariadb-dump to create a complete database dump
|
||||
var startInfo = new System.Diagnostics.ProcessStartInfo
|
||||
{
|
||||
FileName = "mariadb-dump",
|
||||
Arguments = dumpArgs,
|
||||
RedirectStandardOutput = true,
|
||||
RedirectStandardError = true,
|
||||
UseShellExecute = false,
|
||||
CreateNoWindow = true
|
||||
};
|
||||
|
||||
using var process = new System.Diagnostics.Process { StartInfo = startInfo };
|
||||
using var outputFile = new StreamWriter(filePath);
|
||||
|
||||
var outputComplete = new TaskCompletionSource<bool>();
|
||||
var errorComplete = new TaskCompletionSource<bool>();
|
||||
var errorMessages = new List<string>();
|
||||
var startTime = DateTime.UtcNow;
|
||||
|
||||
Console.WriteLine($"Starting mariadb-dump with optimized settings...");
|
||||
Console.WriteLine($"Command: mariadb-dump {dumpArgs}");
|
||||
|
||||
process.OutputDataReceived += (sender, e) =>
|
||||
{
|
||||
if (e.Data == null)
|
||||
{
|
||||
outputComplete.SetResult(true);
|
||||
}
|
||||
else
|
||||
{
|
||||
outputFile.WriteLine(e.Data);
|
||||
outputFile.Flush(); // Ensure data is written immediately
|
||||
|
||||
// Report progress for large dumps
|
||||
if (e.Data.StartsWith("-- Dump completed"))
|
||||
{
|
||||
var duration = DateTime.UtcNow - startTime;
|
||||
Console.WriteLine($"Dump completed in {duration.TotalSeconds:F1} seconds");
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
process.ErrorDataReceived += (sender, e) =>
|
||||
{
|
||||
if (e.Data == null)
|
||||
{
|
||||
errorComplete.SetResult(true);
|
||||
}
|
||||
else
|
||||
{
|
||||
errorMessages.Add(e.Data);
|
||||
Console.WriteLine($"[mariadb-dump] {e.Data}");
|
||||
}
|
||||
};
|
||||
|
||||
process.Start();
|
||||
process.BeginOutputReadLine();
|
||||
process.BeginErrorReadLine();
|
||||
|
||||
// Wait for both output and error streams to complete
|
||||
await Task.WhenAll(outputComplete.Task, errorComplete.Task);
|
||||
await process.WaitForExitAsync();
|
||||
|
||||
if (process.ExitCode != 0)
|
||||
{
|
||||
var errorSummary = string.Join("; ", errorMessages.Take(5)); // Show first 5 errors
|
||||
throw new Exception($"mariadb-dump failed with exit code {process.ExitCode}. Errors: {errorSummary}");
|
||||
}
|
||||
|
||||
var totalDuration = DateTime.UtcNow - startTime;
|
||||
Console.WriteLine($"Database dump completed successfully in {totalDuration.TotalSeconds:F1} seconds");
|
||||
|
||||
// Calculate checksum
|
||||
snapshot.Checksum = await CalculateFileChecksumAsync(filePath);
|
||||
snapshot.DataSize = new FileInfo(filePath).Length;
|
||||
|
||||
// Compress if enabled
|
||||
if (_config.SnapshotStorage.Compression)
|
||||
{
|
||||
await CompressFileAsync(filePath);
|
||||
snapshot.DataSize = new FileInfo(filePath + ".lz4").Length;
|
||||
snapshot.FilePath = filePath + ".lz4";
|
||||
}
|
||||
|
||||
// Encrypt if enabled
|
||||
if (_config.Security.Encryption && !string.IsNullOrEmpty(_config.Security.EncryptionKey))
|
||||
{
|
||||
var originalFilePath = snapshot.FilePath;
|
||||
await EncryptFileAsync(snapshot.FilePath, _config.Security.EncryptionKey);
|
||||
// Update the file path to point to the encrypted file
|
||||
snapshot.FilePath = originalFilePath + ".enc";
|
||||
// Update the file size to reflect the encrypted file size
|
||||
snapshot.DataSize = new FileInfo(snapshot.FilePath).Length;
|
||||
}
|
||||
}
|
||||
|
||||
private string? ExtractValue(string connectionString, string key)
|
||||
{
|
||||
var pairs = connectionString.Split(';');
|
||||
foreach (var pair in pairs)
|
||||
{
|
||||
var keyValue = pair.Split('=');
|
||||
if (keyValue.Length == 2 && keyValue[0].Trim().Equals(key, StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
return keyValue[1].Trim();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private async Task<(string File, long Position)> GetCurrentBinlogStatusAsync()
|
||||
{
|
||||
using var connection = new MySqlConnection(_config.ConnectionString);
|
||||
await connection.OpenAsync();
|
||||
|
||||
using var command = new MySqlCommand("SHOW MASTER STATUS", connection);
|
||||
using var reader = await command.ExecuteReaderAsync();
|
||||
|
||||
if (await reader.ReadAsync())
|
||||
{
|
||||
var file = reader.GetString("File");
|
||||
var position = reader.GetInt64("Position");
|
||||
return (file, position);
|
||||
}
|
||||
|
||||
throw new Exception("Could not get current binlog status");
|
||||
}
|
||||
|
||||
private async Task<string> CalculateFileChecksumAsync(string filePath)
|
||||
{
|
||||
try
|
||||
{
|
||||
var fileInfo = new FileInfo(filePath);
|
||||
|
||||
// Use optimized checksum calculation based on file size
|
||||
if (OptimizedFileService.ShouldUseParallelProcessing(fileInfo.Length))
|
||||
{
|
||||
return await _fileService.CalculateChecksumParallelAsync(filePath);
|
||||
}
|
||||
else
|
||||
{
|
||||
return await _fileService.CalculateChecksumStreamingAsync(filePath);
|
||||
}
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Failed to calculate file checksum: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task CompressFileAsync(string filePath)
|
||||
{
|
||||
try
|
||||
{
|
||||
var fileInfo = new FileInfo(filePath);
|
||||
var compressedPath = filePath + ".lz4";
|
||||
|
||||
// Use optimized LZ4 compression based on file size
|
||||
if (OptimizedFileService.ShouldUseParallelProcessing(fileInfo.Length))
|
||||
{
|
||||
await _fileService.CompressFileStreamingAsync(filePath, compressedPath);
|
||||
}
|
||||
else
|
||||
{
|
||||
var bufferSize = OptimizedFileService.GetOptimalBufferSize(fileInfo.Length);
|
||||
await _fileService.CompressFileStreamingAsync(filePath, compressedPath, bufferSize);
|
||||
}
|
||||
|
||||
// Delete the original uncompressed file
|
||||
File.Delete(filePath);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"File compression failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task EncryptFileAsync(string filePath, string key)
|
||||
{
|
||||
try
|
||||
{
|
||||
// Use the encryption service for file encryption
|
||||
var encryptedFilePath = filePath + ".enc";
|
||||
await _encryptionService.EncryptFileAsync(filePath, encryptedFilePath);
|
||||
|
||||
// Delete the original file and update the path
|
||||
File.Delete(filePath);
|
||||
|
||||
// Update the snapshot file path to point to the encrypted file
|
||||
// This will be handled by the calling method
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"File encryption failed: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private async Task SaveSnapshotMetadataAsync(SnapshotInfo snapshot)
|
||||
{
|
||||
try
|
||||
{
|
||||
var metadataFile = Path.Combine(_metadataPath, $"{snapshot.Id}.json");
|
||||
var json = JsonSerializer.Serialize(snapshot, new JsonSerializerOptions { WriteIndented = true });
|
||||
var jsonBytes = Encoding.UTF8.GetBytes(json);
|
||||
|
||||
// Use optimized file writing
|
||||
await _fileService.WriteFileOptimizedAsync(metadataFile, jsonBytes);
|
||||
}
|
||||
catch (Exception ex)
|
||||
{
|
||||
throw new InvalidOperationException($"Failed to save snapshot metadata: {ex.Message}", ex);
|
||||
}
|
||||
}
|
||||
|
||||
private void LoadNextId()
|
||||
{
|
||||
var metadataFiles = Directory.GetFiles(_metadataPath, "*.json");
|
||||
if (metadataFiles.Length > 0)
|
||||
{
|
||||
var maxId = metadataFiles
|
||||
.Select(f => Path.GetFileNameWithoutExtension(f))
|
||||
.Where(name => int.TryParse(name, out _))
|
||||
.Select(int.Parse)
|
||||
.Max();
|
||||
_nextId = maxId + 1;
|
||||
}
|
||||
}
|
||||
|
||||
public async Task<SnapshotInfo> CreateIncrementalSnapshotAsync(string name, string? description = null, int? userId = null)
|
||||
{
|
||||
// Find the last snapshot (full or incremental)
|
||||
var snapshots = await ListSnapshotsAsync();
|
||||
var lastSnapshot = snapshots.OrderByDescending(s => s.CreatedAt).FirstOrDefault();
|
||||
if (lastSnapshot == null)
|
||||
{
|
||||
throw new Exception("No previous snapshot found. Create a full snapshot first.");
|
||||
}
|
||||
|
||||
// Get the binlog position from the last snapshot
|
||||
string? startBinlogFile;
|
||||
long? startBinlogPosition;
|
||||
|
||||
if (lastSnapshot.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
|
||||
{
|
||||
startBinlogFile = lastSnapshot.BinlogFile;
|
||||
startBinlogPosition = lastSnapshot.BinlogPosition;
|
||||
}
|
||||
else
|
||||
{
|
||||
// For incremental snapshots, use the end position as the start for the next incremental
|
||||
startBinlogFile = lastSnapshot.IncrementalBinlogEndFile;
|
||||
startBinlogPosition = lastSnapshot.IncrementalBinlogEndPosition;
|
||||
}
|
||||
|
||||
if (string.IsNullOrEmpty(startBinlogFile) || startBinlogPosition == null)
|
||||
{
|
||||
throw new Exception("No previous snapshot with binlog info found. Create a full snapshot first.");
|
||||
}
|
||||
|
||||
// Get current binlog status
|
||||
var (endFile, endPos) = await GetCurrentBinlogStatusAsync();
|
||||
|
||||
// Prepare file paths
|
||||
var snapshot = new SnapshotInfo
|
||||
{
|
||||
Id = _nextId++,
|
||||
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
|
||||
Status = SnapshotStatus.Creating.ToString(),
|
||||
Description = description ?? name,
|
||||
UserId = userId,
|
||||
CreatedAt = DateTime.UtcNow,
|
||||
Type = "Incremental",
|
||||
ParentSnapshotId = lastSnapshot.Id,
|
||||
IncrementalBinlogStartFile = startBinlogFile,
|
||||
IncrementalBinlogStartPosition = startBinlogPosition,
|
||||
IncrementalBinlogEndFile = endFile,
|
||||
IncrementalBinlogEndPosition = endPos
|
||||
};
|
||||
|
||||
var fileName = $"inc_{snapshot.Id}_{snapshot.Timestamp}.binlog";
|
||||
var filePath = Path.Combine(_snapshotsPath, fileName);
|
||||
snapshot.FilePath = filePath;
|
||||
|
||||
// Use mysqlbinlog to extract the binlog segment (no --raw, redirect output to file)
|
||||
// Extract connection details from configuration
|
||||
var connectionString = _config.ConnectionString;
|
||||
var server = ExtractValue(connectionString, "Server") ?? "localhost";
|
||||
var port = ExtractValue(connectionString, "Port") ?? "3306";
|
||||
var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
|
||||
var dbUserId = ExtractValue(connectionString, "Uid") ?? "root";
|
||||
var password = ExtractValue(connectionString, "Pwd") ?? "";
|
||||
|
||||
var args = $"--read-from-remote-server --host={server} --port={port} --user={dbUserId}";
|
||||
if (!string.IsNullOrEmpty(password))
|
||||
{
|
||||
args += $" --password={password}";
|
||||
}
|
||||
args += $" --start-position={startBinlogPosition} --stop-position={endPos} {startBinlogFile}";
|
||||
if (startBinlogFile != endFile)
|
||||
{
|
||||
// If binlog rotated, need to handle multiple files (not implemented here for brevity)
|
||||
throw new NotImplementedException("Incremental snapshot across multiple binlog files is not yet supported.");
|
||||
}
|
||||
|
||||
var startInfo = new System.Diagnostics.ProcessStartInfo
|
||||
{
|
||||
FileName = "mysqlbinlog",
|
||||
Arguments = args,
|
||||
RedirectStandardOutput = true,
|
||||
RedirectStandardError = true,
|
||||
UseShellExecute = false,
|
||||
CreateNoWindow = true
|
||||
};
|
||||
|
||||
using var process = new System.Diagnostics.Process { StartInfo = startInfo };
|
||||
using var outputFile = new StreamWriter(filePath);
|
||||
var error = new StringBuilder();
|
||||
process.ErrorDataReceived += (sender, e) => { if (e.Data != null) error.AppendLine(e.Data); };
|
||||
process.Start();
|
||||
process.BeginErrorReadLine();
|
||||
// Write stdout to file
|
||||
while (!process.StandardOutput.EndOfStream)
|
||||
{
|
||||
var line = await process.StandardOutput.ReadLineAsync();
|
||||
if (line != null)
|
||||
await outputFile.WriteLineAsync(line);
|
||||
}
|
||||
await process.WaitForExitAsync();
|
||||
outputFile.Close();
|
||||
if (process.ExitCode != 0)
|
||||
{
|
||||
throw new Exception($"mysqlbinlog failed: {error}");
|
||||
}
|
||||
|
||||
// Calculate checksum and size
|
||||
snapshot.Checksum = await CalculateFileChecksumAsync(filePath);
|
||||
snapshot.DataSize = new FileInfo(filePath).Length;
|
||||
|
||||
// Compress if enabled
|
||||
if (_config.SnapshotStorage.Compression)
|
||||
{
|
||||
await CompressFileAsync(filePath);
|
||||
snapshot.DataSize = new FileInfo(filePath + ".lz4").Length;
|
||||
snapshot.FilePath = filePath + ".lz4";
|
||||
}
|
||||
|
||||
// Encrypt if enabled
|
||||
if (_config.Security.Encryption && !string.IsNullOrEmpty(_config.Security.EncryptionKey))
|
||||
{
|
||||
var originalFilePath = snapshot.FilePath;
|
||||
await EncryptFileAsync(snapshot.FilePath, _config.Security.EncryptionKey);
|
||||
// Update the file path to point to the encrypted file
|
||||
snapshot.FilePath = originalFilePath + ".enc";
|
||||
// Update the file size to reflect the encrypted file size
|
||||
snapshot.DataSize = new FileInfo(snapshot.FilePath).Length;
|
||||
}
|
||||
|
||||
// Save metadata
|
||||
await SaveSnapshotMetadataAsync(snapshot);
|
||||
snapshot.Status = SnapshotStatus.Completed.ToString();
|
||||
await SaveSnapshotMetadataAsync(snapshot);
|
||||
return snapshot;
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user