634 lines
25 KiB
C#
634 lines
25 KiB
C#
using System.ComponentModel.DataAnnotations;
|
|
using System.IO.Compression;
|
|
using System.Security.Cryptography;
|
|
using System.Text;
|
|
using System.Text.Json;
|
|
using DatabaseSnapshotsService.Models;
|
|
using MySqlConnector;
|
|
using DatabaseSnapshotsService.Services;
|
|
|
|
namespace DatabaseSnapshotsService.Services
|
|
{
|
|
public class SnapshotService
|
|
{
|
|
private readonly SnapshotConfiguration _config;
|
|
private readonly string _snapshotsPath;
|
|
private readonly string _metadataPath;
|
|
private readonly EncryptionService _encryptionService;
|
|
private readonly OptimizedFileService _fileService;
|
|
private int _nextId = 1;
|
|
|
|
public SnapshotService(SnapshotConfiguration config)
|
|
{
|
|
_config = config;
|
|
_snapshotsPath = Path.GetFullPath(config.SnapshotStorage.Path);
|
|
_metadataPath = Path.Combine(_snapshotsPath, "metadata");
|
|
|
|
// Initialize encryption service - match RecoveryService pattern
|
|
_encryptionService = new EncryptionService(
|
|
config.Security.EncryptionKey,
|
|
config.Security.Encryption
|
|
);
|
|
|
|
_fileService = new OptimizedFileService();
|
|
|
|
// Ensure directories exist
|
|
Directory.CreateDirectory(_snapshotsPath);
|
|
Directory.CreateDirectory(_metadataPath);
|
|
|
|
// Load next ID from existing snapshots
|
|
LoadNextId();
|
|
}
|
|
|
|
public async Task<SnapshotInfo> CreateSnapshotAsync(string name, string? description = null, int? userId = null)
|
|
{
|
|
// Validate and sanitize inputs
|
|
var nameValidation = InputValidation.SnapshotValidation.ValidateSnapshotName(name);
|
|
if (nameValidation != ValidationResult.Success)
|
|
{
|
|
throw new ArgumentException($"Invalid snapshot name: {nameValidation.ErrorMessage}");
|
|
}
|
|
|
|
var sanitizedName = InputValidation.SanitizeString(name);
|
|
var sanitizedDescription = InputValidation.SanitizeString(description ?? string.Empty);
|
|
|
|
if (!string.IsNullOrEmpty(sanitizedDescription))
|
|
{
|
|
var descriptionValidation = InputValidation.SnapshotValidation.ValidateSnapshotDescription(sanitizedDescription);
|
|
if (descriptionValidation != ValidationResult.Success)
|
|
{
|
|
throw new ArgumentException($"Invalid snapshot description: {descriptionValidation.ErrorMessage}");
|
|
}
|
|
}
|
|
|
|
var snapshot = new SnapshotInfo
|
|
{
|
|
Id = _nextId++,
|
|
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
|
|
Status = SnapshotStatus.Creating.ToString(),
|
|
Description = string.IsNullOrEmpty(sanitizedDescription) ? sanitizedName : sanitizedDescription,
|
|
UserId = userId,
|
|
CreatedAt = DateTime.UtcNow
|
|
};
|
|
|
|
try
|
|
{
|
|
await CreateFullSnapshotAsync(snapshot);
|
|
await SaveSnapshotMetadataAsync(snapshot);
|
|
snapshot.Status = SnapshotStatus.Completed.ToString();
|
|
await SaveSnapshotMetadataAsync(snapshot);
|
|
return snapshot;
|
|
}
|
|
catch (Exception)
|
|
{
|
|
snapshot.Status = SnapshotStatus.Failed.ToString();
|
|
await SaveSnapshotMetadataAsync(snapshot);
|
|
throw;
|
|
}
|
|
}
|
|
|
|
public async Task<List<SnapshotInfo>> ListSnapshotsAsync(string? type = null, int limit = 50)
|
|
{
|
|
var snapshots = new List<SnapshotInfo>();
|
|
var metadataFiles = Directory.GetFiles(_metadataPath, "*.json");
|
|
|
|
foreach (var file in metadataFiles)
|
|
{
|
|
try
|
|
{
|
|
var json = await File.ReadAllTextAsync(file);
|
|
var snapshot = JsonSerializer.Deserialize<SnapshotInfo>(json);
|
|
|
|
if (snapshot != null && (string.IsNullOrEmpty(type) || snapshot.Type.Equals(type, StringComparison.OrdinalIgnoreCase)))
|
|
{
|
|
snapshots.Add(snapshot);
|
|
}
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
Console.WriteLine($"Warning: Could not load snapshot metadata from {file}: {ex.Message}");
|
|
}
|
|
}
|
|
|
|
return snapshots.OrderByDescending(s => s.CreatedAt).Take(limit).ToList();
|
|
}
|
|
|
|
public async Task<SnapshotInfo?> GetSnapshotAsync(int id)
|
|
{
|
|
var metadataFile = Path.Combine(_metadataPath, $"{id}.json");
|
|
|
|
if (!File.Exists(metadataFile))
|
|
return null;
|
|
|
|
try
|
|
{
|
|
var json = await File.ReadAllTextAsync(metadataFile);
|
|
return JsonSerializer.Deserialize<SnapshotInfo>(json);
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
Console.WriteLine($"Error loading snapshot {id}: {ex.Message}");
|
|
return null;
|
|
}
|
|
}
|
|
|
|
public async Task DeleteSnapshotAsync(int id)
|
|
{
|
|
var snapshot = await GetSnapshotAsync(id);
|
|
if (snapshot == null)
|
|
throw new ArgumentException($"Snapshot {id} not found");
|
|
|
|
// Delete snapshot file
|
|
if (File.Exists(snapshot.FilePath))
|
|
{
|
|
File.Delete(snapshot.FilePath);
|
|
}
|
|
|
|
// Delete metadata
|
|
var metadataFile = Path.Combine(_metadataPath, $"{id}.json");
|
|
if (File.Exists(metadataFile))
|
|
{
|
|
File.Delete(metadataFile);
|
|
}
|
|
}
|
|
|
|
private async Task CreateFullSnapshotAsync(SnapshotInfo snapshot)
|
|
{
|
|
var fileName = $"snapshot_{snapshot.Id}_{snapshot.Timestamp}.sql";
|
|
var filePath = Path.Combine(_snapshotsPath, fileName);
|
|
snapshot.FilePath = filePath;
|
|
|
|
// Get current binlog status before creating snapshot
|
|
var binlogStatus = await GetCurrentBinlogStatusAsync();
|
|
snapshot.BinlogFile = binlogStatus.File;
|
|
snapshot.BinlogPosition = binlogStatus.Position;
|
|
snapshot.Type = "Full";
|
|
|
|
// Extract connection details from configuration
|
|
var connectionString = _config.ConnectionString;
|
|
var server = ExtractValue(connectionString, "Server") ?? "localhost";
|
|
var port = ExtractValue(connectionString, "Port") ?? "3306";
|
|
var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
|
|
var dbUserId = ExtractValue(connectionString, "Uid") ?? "root";
|
|
var password = ExtractValue(connectionString, "Pwd") ?? "";
|
|
|
|
// Build mariadb-dump command arguments with optimizations
|
|
var dumpArgs = $"-h{server} -P{port} -u{dbUserId}";
|
|
if (!string.IsNullOrEmpty(password))
|
|
{
|
|
dumpArgs += $" -p{password}";
|
|
}
|
|
|
|
// Add performance and consistency optimizations based on configuration
|
|
var optimizations = _config.SnapshotStorage.DumpOptimizations;
|
|
|
|
if (optimizations.SingleTransaction)
|
|
{
|
|
dumpArgs += $" --single-transaction";
|
|
}
|
|
if (optimizations.IncludeRoutines)
|
|
{
|
|
dumpArgs += $" --routines";
|
|
}
|
|
if (optimizations.IncludeTriggers)
|
|
{
|
|
dumpArgs += $" --triggers";
|
|
}
|
|
if (optimizations.IncludeEvents)
|
|
{
|
|
dumpArgs += $" --events";
|
|
}
|
|
if (optimizations.Quick)
|
|
{
|
|
dumpArgs += $" --quick";
|
|
}
|
|
if (optimizations.OrderByPrimary)
|
|
{
|
|
dumpArgs += $" --order-by-primary";
|
|
}
|
|
if (optimizations.FlushLogs)
|
|
{
|
|
dumpArgs += $" --flush-logs";
|
|
}
|
|
if (optimizations.MasterData > 0)
|
|
{
|
|
dumpArgs += $" --master-data={optimizations.MasterData}";
|
|
}
|
|
if (optimizations.Compact)
|
|
{
|
|
dumpArgs += $" --compact";
|
|
}
|
|
if (optimizations.NoAutocommit)
|
|
{
|
|
dumpArgs += $" --no-autocommit";
|
|
}
|
|
if (optimizations.LockTables)
|
|
{
|
|
dumpArgs += $" --lock-tables";
|
|
} else {
|
|
dumpArgs += $" --skip-lock-tables";
|
|
}
|
|
dumpArgs += $" --add-drop-database";
|
|
dumpArgs += $" --add-drop-table";
|
|
dumpArgs += $" --create-options";
|
|
if (optimizations.ExtendedInsert)
|
|
{
|
|
dumpArgs += $" --extended-insert";
|
|
}
|
|
if (optimizations.CompleteInsert)
|
|
{
|
|
dumpArgs += $" --complete-insert";
|
|
}
|
|
if (optimizations.HexBlob)
|
|
{
|
|
dumpArgs += $" --hex-blob";
|
|
}
|
|
dumpArgs += $" --net_buffer_length={optimizations.NetBufferLength}";
|
|
dumpArgs += $" --max_allowed_packet={optimizations.MaxAllowedPacket}";
|
|
if (optimizations.ExcludeTables.Any())
|
|
{
|
|
foreach (var table in optimizations.ExcludeTables)
|
|
{
|
|
dumpArgs += $" --ignore-table={database}.{table}";
|
|
}
|
|
}
|
|
if (optimizations.IncludeTables.Any())
|
|
{
|
|
dumpArgs += $" --tables";
|
|
foreach (var table in optimizations.IncludeTables)
|
|
{
|
|
dumpArgs += $" {table}";
|
|
}
|
|
dumpArgs += $" {database}";
|
|
}
|
|
else
|
|
{
|
|
dumpArgs += $" --databases {database}";
|
|
}
|
|
|
|
// Use mariadb-dump to create a complete database dump
|
|
var startInfo = new System.Diagnostics.ProcessStartInfo
|
|
{
|
|
FileName = "mariadb-dump",
|
|
Arguments = dumpArgs,
|
|
RedirectStandardOutput = true,
|
|
RedirectStandardError = true,
|
|
UseShellExecute = false,
|
|
CreateNoWindow = true
|
|
};
|
|
|
|
using var process = new System.Diagnostics.Process { StartInfo = startInfo };
|
|
using var outputFile = new StreamWriter(filePath);
|
|
|
|
var outputComplete = new TaskCompletionSource<bool>();
|
|
var errorComplete = new TaskCompletionSource<bool>();
|
|
var errorMessages = new List<string>();
|
|
var startTime = DateTime.UtcNow;
|
|
|
|
Console.WriteLine($"Starting mariadb-dump with optimized settings...");
|
|
Console.WriteLine($"Command: mariadb-dump {dumpArgs}");
|
|
|
|
process.OutputDataReceived += (sender, e) =>
|
|
{
|
|
if (e.Data == null)
|
|
{
|
|
outputComplete.SetResult(true);
|
|
}
|
|
else
|
|
{
|
|
outputFile.WriteLine(e.Data);
|
|
outputFile.Flush(); // Ensure data is written immediately
|
|
|
|
// Report progress for large dumps
|
|
if (e.Data.StartsWith("-- Dump completed"))
|
|
{
|
|
var duration = DateTime.UtcNow - startTime;
|
|
Console.WriteLine($"Dump completed in {duration.TotalSeconds:F1} seconds");
|
|
}
|
|
}
|
|
};
|
|
|
|
process.ErrorDataReceived += (sender, e) =>
|
|
{
|
|
if (e.Data == null)
|
|
{
|
|
errorComplete.SetResult(true);
|
|
}
|
|
else
|
|
{
|
|
errorMessages.Add(e.Data);
|
|
Console.WriteLine($"[mariadb-dump] {e.Data}");
|
|
}
|
|
};
|
|
|
|
process.Start();
|
|
process.BeginOutputReadLine();
|
|
process.BeginErrorReadLine();
|
|
|
|
// Wait for both output and error streams to complete
|
|
await Task.WhenAll(outputComplete.Task, errorComplete.Task);
|
|
await process.WaitForExitAsync();
|
|
|
|
if (process.ExitCode != 0)
|
|
{
|
|
var errorSummary = string.Join("; ", errorMessages.Take(5)); // Show first 5 errors
|
|
throw new Exception($"mariadb-dump failed with exit code {process.ExitCode}. Errors: {errorSummary}");
|
|
}
|
|
|
|
var totalDuration = DateTime.UtcNow - startTime;
|
|
Console.WriteLine($"Database dump completed successfully in {totalDuration.TotalSeconds:F1} seconds");
|
|
|
|
// Calculate checksum
|
|
snapshot.Checksum = await CalculateFileChecksumAsync(filePath);
|
|
snapshot.DataSize = new FileInfo(filePath).Length;
|
|
|
|
// Compress if enabled
|
|
if (_config.SnapshotStorage.Compression)
|
|
{
|
|
await CompressFileAsync(filePath);
|
|
snapshot.DataSize = new FileInfo(filePath + ".lz4").Length;
|
|
snapshot.FilePath = filePath + ".lz4";
|
|
}
|
|
|
|
// Encrypt if enabled
|
|
if (_config.Security.Encryption && !string.IsNullOrEmpty(_config.Security.EncryptionKey))
|
|
{
|
|
var originalFilePath = snapshot.FilePath;
|
|
await EncryptFileAsync(snapshot.FilePath, _config.Security.EncryptionKey);
|
|
// Update the file path to point to the encrypted file
|
|
snapshot.FilePath = originalFilePath + ".enc";
|
|
// Update the file size to reflect the encrypted file size
|
|
snapshot.DataSize = new FileInfo(snapshot.FilePath).Length;
|
|
}
|
|
}
|
|
|
|
private string? ExtractValue(string connectionString, string key)
|
|
{
|
|
var pairs = connectionString.Split(';');
|
|
foreach (var pair in pairs)
|
|
{
|
|
var keyValue = pair.Split('=');
|
|
if (keyValue.Length == 2 && keyValue[0].Trim().Equals(key, StringComparison.OrdinalIgnoreCase))
|
|
{
|
|
return keyValue[1].Trim();
|
|
}
|
|
}
|
|
return null;
|
|
}
|
|
|
|
private async Task<(string File, long Position)> GetCurrentBinlogStatusAsync()
|
|
{
|
|
using var connection = new MySqlConnection(_config.ConnectionString);
|
|
await connection.OpenAsync();
|
|
|
|
using var command = new MySqlCommand("SHOW MASTER STATUS", connection);
|
|
using var reader = await command.ExecuteReaderAsync();
|
|
|
|
if (await reader.ReadAsync())
|
|
{
|
|
var file = reader.GetString("File");
|
|
var position = reader.GetInt64("Position");
|
|
return (file, position);
|
|
}
|
|
|
|
throw new Exception("Could not get current binlog status");
|
|
}
|
|
|
|
private async Task<string> CalculateFileChecksumAsync(string filePath)
|
|
{
|
|
try
|
|
{
|
|
var fileInfo = new FileInfo(filePath);
|
|
|
|
// Use optimized checksum calculation based on file size
|
|
if (OptimizedFileService.ShouldUseParallelProcessing(fileInfo.Length))
|
|
{
|
|
return await _fileService.CalculateChecksumParallelAsync(filePath);
|
|
}
|
|
else
|
|
{
|
|
return await _fileService.CalculateChecksumStreamingAsync(filePath);
|
|
}
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
throw new InvalidOperationException($"Failed to calculate file checksum: {ex.Message}", ex);
|
|
}
|
|
}
|
|
|
|
private async Task CompressFileAsync(string filePath)
|
|
{
|
|
try
|
|
{
|
|
var fileInfo = new FileInfo(filePath);
|
|
var compressedPath = filePath + ".lz4";
|
|
|
|
// Use optimized LZ4 compression based on file size
|
|
if (OptimizedFileService.ShouldUseParallelProcessing(fileInfo.Length))
|
|
{
|
|
await _fileService.CompressFileStreamingAsync(filePath, compressedPath);
|
|
}
|
|
else
|
|
{
|
|
var bufferSize = OptimizedFileService.GetOptimalBufferSize(fileInfo.Length);
|
|
await _fileService.CompressFileStreamingAsync(filePath, compressedPath, bufferSize);
|
|
}
|
|
|
|
// Delete the original uncompressed file
|
|
File.Delete(filePath);
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
throw new InvalidOperationException($"File compression failed: {ex.Message}", ex);
|
|
}
|
|
}
|
|
|
|
private async Task EncryptFileAsync(string filePath, string key)
|
|
{
|
|
try
|
|
{
|
|
// Use the encryption service for file encryption
|
|
var encryptedFilePath = filePath + ".enc";
|
|
await _encryptionService.EncryptFileAsync(filePath, encryptedFilePath);
|
|
|
|
// Delete the original file and update the path
|
|
File.Delete(filePath);
|
|
|
|
// Update the snapshot file path to point to the encrypted file
|
|
// This will be handled by the calling method
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
throw new InvalidOperationException($"File encryption failed: {ex.Message}", ex);
|
|
}
|
|
}
|
|
|
|
private async Task SaveSnapshotMetadataAsync(SnapshotInfo snapshot)
|
|
{
|
|
try
|
|
{
|
|
var metadataFile = Path.Combine(_metadataPath, $"{snapshot.Id}.json");
|
|
var json = JsonSerializer.Serialize(snapshot, new JsonSerializerOptions { WriteIndented = true });
|
|
var jsonBytes = Encoding.UTF8.GetBytes(json);
|
|
|
|
// Use optimized file writing
|
|
await _fileService.WriteFileOptimizedAsync(metadataFile, jsonBytes);
|
|
}
|
|
catch (Exception ex)
|
|
{
|
|
throw new InvalidOperationException($"Failed to save snapshot metadata: {ex.Message}", ex);
|
|
}
|
|
}
|
|
|
|
private void LoadNextId()
|
|
{
|
|
var metadataFiles = Directory.GetFiles(_metadataPath, "*.json");
|
|
if (metadataFiles.Length > 0)
|
|
{
|
|
var maxId = metadataFiles
|
|
.Select(f => Path.GetFileNameWithoutExtension(f))
|
|
.Where(name => int.TryParse(name, out _))
|
|
.Select(int.Parse)
|
|
.Max();
|
|
_nextId = maxId + 1;
|
|
}
|
|
}
|
|
|
|
public async Task<SnapshotInfo> CreateIncrementalSnapshotAsync(string name, string? description = null, int? userId = null)
|
|
{
|
|
// Find the last snapshot (full or incremental)
|
|
var snapshots = await ListSnapshotsAsync();
|
|
var lastSnapshot = snapshots.OrderByDescending(s => s.CreatedAt).FirstOrDefault();
|
|
if (lastSnapshot == null)
|
|
{
|
|
throw new Exception("No previous snapshot found. Create a full snapshot first.");
|
|
}
|
|
|
|
// Get the binlog position from the last snapshot
|
|
string? startBinlogFile;
|
|
long? startBinlogPosition;
|
|
|
|
if (lastSnapshot.Type.Equals("Full", StringComparison.OrdinalIgnoreCase))
|
|
{
|
|
startBinlogFile = lastSnapshot.BinlogFile;
|
|
startBinlogPosition = lastSnapshot.BinlogPosition;
|
|
}
|
|
else
|
|
{
|
|
// For incremental snapshots, use the end position as the start for the next incremental
|
|
startBinlogFile = lastSnapshot.IncrementalBinlogEndFile;
|
|
startBinlogPosition = lastSnapshot.IncrementalBinlogEndPosition;
|
|
}
|
|
|
|
if (string.IsNullOrEmpty(startBinlogFile) || startBinlogPosition == null)
|
|
{
|
|
throw new Exception("No previous snapshot with binlog info found. Create a full snapshot first.");
|
|
}
|
|
|
|
// Get current binlog status
|
|
var (endFile, endPos) = await GetCurrentBinlogStatusAsync();
|
|
|
|
// Prepare file paths
|
|
var snapshot = new SnapshotInfo
|
|
{
|
|
Id = _nextId++,
|
|
Timestamp = DateTimeOffset.UtcNow.ToUnixTimeSeconds(),
|
|
Status = SnapshotStatus.Creating.ToString(),
|
|
Description = description ?? name,
|
|
UserId = userId,
|
|
CreatedAt = DateTime.UtcNow,
|
|
Type = "Incremental",
|
|
ParentSnapshotId = lastSnapshot.Id,
|
|
IncrementalBinlogStartFile = startBinlogFile,
|
|
IncrementalBinlogStartPosition = startBinlogPosition,
|
|
IncrementalBinlogEndFile = endFile,
|
|
IncrementalBinlogEndPosition = endPos
|
|
};
|
|
|
|
var fileName = $"inc_{snapshot.Id}_{snapshot.Timestamp}.binlog";
|
|
var filePath = Path.Combine(_snapshotsPath, fileName);
|
|
snapshot.FilePath = filePath;
|
|
|
|
// Use mysqlbinlog to extract the binlog segment (no --raw, redirect output to file)
|
|
// Extract connection details from configuration
|
|
var connectionString = _config.ConnectionString;
|
|
var server = ExtractValue(connectionString, "Server") ?? "localhost";
|
|
var port = ExtractValue(connectionString, "Port") ?? "3306";
|
|
var database = ExtractValue(connectionString, "Database") ?? "trading_platform";
|
|
var dbUserId = ExtractValue(connectionString, "Uid") ?? "root";
|
|
var password = ExtractValue(connectionString, "Pwd") ?? "";
|
|
|
|
var args = $"--read-from-remote-server --host={server} --port={port} --user={dbUserId}";
|
|
if (!string.IsNullOrEmpty(password))
|
|
{
|
|
args += $" --password={password}";
|
|
}
|
|
args += $" --start-position={startBinlogPosition} --stop-position={endPos} {startBinlogFile}";
|
|
if (startBinlogFile != endFile)
|
|
{
|
|
// If binlog rotated, need to handle multiple files (not implemented here for brevity)
|
|
throw new NotImplementedException("Incremental snapshot across multiple binlog files is not yet supported.");
|
|
}
|
|
|
|
var startInfo = new System.Diagnostics.ProcessStartInfo
|
|
{
|
|
FileName = "mysqlbinlog",
|
|
Arguments = args,
|
|
RedirectStandardOutput = true,
|
|
RedirectStandardError = true,
|
|
UseShellExecute = false,
|
|
CreateNoWindow = true
|
|
};
|
|
|
|
using var process = new System.Diagnostics.Process { StartInfo = startInfo };
|
|
using var outputFile = new StreamWriter(filePath);
|
|
var error = new StringBuilder();
|
|
process.ErrorDataReceived += (sender, e) => { if (e.Data != null) error.AppendLine(e.Data); };
|
|
process.Start();
|
|
process.BeginErrorReadLine();
|
|
// Write stdout to file
|
|
while (!process.StandardOutput.EndOfStream)
|
|
{
|
|
var line = await process.StandardOutput.ReadLineAsync();
|
|
if (line != null)
|
|
await outputFile.WriteLineAsync(line);
|
|
}
|
|
await process.WaitForExitAsync();
|
|
outputFile.Close();
|
|
if (process.ExitCode != 0)
|
|
{
|
|
throw new Exception($"mysqlbinlog failed: {error}");
|
|
}
|
|
|
|
// Calculate checksum and size
|
|
snapshot.Checksum = await CalculateFileChecksumAsync(filePath);
|
|
snapshot.DataSize = new FileInfo(filePath).Length;
|
|
|
|
// Compress if enabled
|
|
if (_config.SnapshotStorage.Compression)
|
|
{
|
|
await CompressFileAsync(filePath);
|
|
snapshot.DataSize = new FileInfo(filePath + ".lz4").Length;
|
|
snapshot.FilePath = filePath + ".lz4";
|
|
}
|
|
|
|
// Encrypt if enabled
|
|
if (_config.Security.Encryption && !string.IsNullOrEmpty(_config.Security.EncryptionKey))
|
|
{
|
|
var originalFilePath = snapshot.FilePath;
|
|
await EncryptFileAsync(snapshot.FilePath, _config.Security.EncryptionKey);
|
|
// Update the file path to point to the encrypted file
|
|
snapshot.FilePath = originalFilePath + ".enc";
|
|
// Update the file size to reflect the encrypted file size
|
|
snapshot.DataSize = new FileInfo(snapshot.FilePath).Length;
|
|
}
|
|
|
|
// Save metadata
|
|
await SaveSnapshotMetadataAsync(snapshot);
|
|
snapshot.Status = SnapshotStatus.Completed.ToString();
|
|
await SaveSnapshotMetadataAsync(snapshot);
|
|
return snapshot;
|
|
}
|
|
}
|
|
}
|