PartitionStorage insert after compress

NOT TESTED!!!
pull/4/head
Ogoun 2 years ago
parent 65133c3cec
commit 2bf1605bee

@ -154,8 +154,8 @@ namespace PartitionFileStorageTest
sw.Stop();
Console.WriteLine($"Fill journal: {sw.ElapsedMilliseconds}ms");
sw.Restart();
storeIncoming.CompleteStoreAndRebuild();
storeOutcoming.CompleteStoreAndRebuild();
storeIncoming.CompleteAddingAndCompress();
storeOutcoming.CompleteAddingAndCompress();
sw.Stop();
Console.WriteLine($"Rebuild journal to store: {sw.ElapsedMilliseconds}ms");
sw.Restart();

@ -44,6 +44,7 @@ namespace ZeroLevel.Network
/// Maximum size of data packet to transmit (serialized frame size)
/// </summary>
private const int DEFAULT_MAX_FRAME_PAYLOAD_SIZE = 1024 * 1024 * 32;
public readonly static int MAX_FRAME_PAYLOAD_SIZE;
/// <summary>

@ -1,4 +1,6 @@
using System.Threading.Tasks;
using System;
using System.Collections.Generic;
using System.Threading.Tasks;
namespace ZeroLevel.Services.PartitionStorage
{
@ -13,6 +15,8 @@ namespace ZeroLevel.Services.PartitionStorage
{
IStorePartitionAccessor<TKey, TInput, TValue> CreateAccessor(TMeta info);
IStorePartitionAccessor<TKey, TInput, TValue> CreateMergeAccessor(TMeta info, Func<TValue, IEnumerable<TInput>> decompressor);
Task<StoreSearchResult<TKey, TValue, TMeta>> Search(StoreSearchRequest<TKey, TMeta> searchRequest);
}
}

@ -1,6 +1,7 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using ZeroLevel.Services.FileSystem;
namespace ZeroLevel.Services.PartitionStorage
@ -67,5 +68,26 @@ namespace ZeroLevel.Services.PartitionStorage
}
return path;
}
public IStoreOptions<TKey, TInput, TValue, TMeta> Clone()
{
var options = new IStoreOptions<TKey, TInput, TValue, TMeta>
{
Index = new IndexOptions
{
Enabled = this.Index.Enabled,
FileIndexCount = this.Index.FileIndexCount
},
FilePartition = this.FilePartition,
KeyComparer = this.KeyComparer,
MaxDegreeOfParallelism = this.MaxDegreeOfParallelism,
MergeFunction = this.MergeFunction,
Partitions = this.Partitions
.Select(p => new StoreCatalogPartition<TMeta>(p.Name, p.PathExtractor))
.ToList(),
RootFolder = this.RootFolder
};
return options;
}
}
}

@ -14,14 +14,15 @@ namespace ZeroLevel.Services.PartitionStorage
{
string GetCatalogPath();
/// <summary>
/// Save one record
/// Has any files
/// </summary>
void Store(TKey key, TInput value);
int CountDataFiles();
/// <summary>
/// Complete the recording and perform the conversion of the records from
/// (TKey; TInput) to (TKey; TValue)
/// Remove all files
/// </summary>
void CompleteStoreAndRebuild();
void DropData();
#region API !only after data compression!
/// <summary>
/// Rebuild indexes
/// </summary>
@ -36,13 +37,18 @@ namespace ZeroLevel.Services.PartitionStorage
IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> Find(IEnumerable<TKey> keys);
IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> Iterate();
IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> IterateKeyBacket(TKey key);
#endregion
#region API !only before data compression!
/// <summary>
/// Has any files
/// Save one record
/// </summary>
int CountDataFiles();
void Store(TKey key, TInput value);
/// <summary>
/// Remove all files
/// Complete the recording and perform the conversion of the records from
/// (TKey; TInput) to (TKey; TValue)
/// </summary>
void DropData();
void CompleteAddingAndCompress();
#endregion
}
}

@ -26,6 +26,12 @@ namespace ZeroLevel.Services.PartitionStorage
return new StorePartitionAccessor<TKey, TInput, TValue, TMeta>(_options, info);
}
public IStorePartitionAccessor<TKey, TInput, TValue> CreateMergeAccessor(TMeta info
, Func<TValue, IEnumerable<TInput>> decompressor)
{
return new StoreMergePartitionAccessor<TKey, TInput, TValue, TMeta>(_options, info, decompressor);
}
public async Task<StoreSearchResult<TKey, TValue, TMeta>> Search(StoreSearchRequest<TKey, TMeta> searchRequest)
{
var result = new StoreSearchResult<TKey, TValue, TMeta>();

@ -0,0 +1,127 @@
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using ZeroLevel.Services.Serialization;
namespace ZeroLevel.Services.PartitionStorage
{
/// <summary>
/// For writing new values in exist partition
///
/// ORDER: Store -> CompleteAddingAndCompress -> RebuildIndex
///
/// </summary>
public class StoreMergePartitionAccessor<TKey, TInput, TValue, TMeta>
: IStorePartitionAccessor<TKey, TInput, TValue>
{
private readonly Func<TValue, IEnumerable<TInput>> _decompress;
/// <summary>
/// Exists compressed catalog
/// </summary>
private readonly IStorePartitionAccessor<TKey, TInput, TValue> _accessor;
/// <summary>
/// Write catalog
/// </summary>
private readonly IStorePartitionAccessor<TKey, TInput, TValue> _temporaryAccessor;
public StoreMergePartitionAccessor(IStoreOptions<TKey, TInput, TValue, TMeta> options,
TMeta info, Func<TValue, IEnumerable<TInput>> decompress)
{
if (decompress == null) throw new ArgumentNullException(nameof(decompress));
_decompress = decompress;
_accessor = new StorePartitionAccessor<TKey, TInput, TValue, TMeta>(options, info);
var tempCatalog = Path.Combine(_accessor.GetCatalogPath(), Guid.NewGuid().ToString());
var tempOptions = options.Clone();
tempOptions.RootFolder = tempCatalog;
_temporaryAccessor = new StorePartitionAccessor<TKey, TInput, TValue, TMeta>(tempOptions, info);
}
private IEnumerable<StorePartitionKeyValueSearchResult<TKey, IEnumerable<TInput>>>
IterateReadKeyInputs(string filePath)
{
if (File.Exists(filePath))
{
var stream = new FileStream(filePath, FileMode.Open, FileAccess.Read, FileShare.ReadWrite, 4096 * 1024);
using (var reader = new MemoryStreamReader(stream))
{
while (reader.EOS == false)
{
var k = reader.ReadCompatible<TKey>();
var v = reader.ReadCompatible<TValue>();
var input = _decompress(v);
yield return
new StorePartitionKeyValueSearchResult<TKey, IEnumerable<TInput>>
{
Key = k,
Value = input,
Found = true
};
}
}
}
}
public void CompleteAddingAndCompress()
{
var newFiles = Directory.GetFiles(_temporaryAccessor.GetCatalogPath());
if (newFiles != null && newFiles.Length > 1)
{
var folder = _accessor.GetCatalogPath();
var existsFiles = Directory.GetFiles(folder)
?.ToDictionary(f => Path.GetFileName(f), f => f);
foreach (var file in newFiles)
{
var name = Path.GetFileName(file);
// if datafile by key exists
if (existsFiles.ContainsKey(name))
{
// append all records from existing file to new
foreach (var r in IterateReadKeyInputs(existsFiles[name]))
{
foreach (var i in r.Value)
{
_temporaryAccessor.Store(r.Key, i);
}
}
}
// compress new file
(_temporaryAccessor as StorePartitionAccessor<TKey, TInput, TValue, TMeta>)
.CompressFile(file);
// replace old file by new
File.Move(file, Path.Combine(folder, name), true);
}
}
// remove temporary files
_temporaryAccessor.DropData();
Directory.Delete(_temporaryAccessor.GetCatalogPath(), true);
}
public StorePartitionKeyValueSearchResult<TKey, TValue> Find(TKey key)
=> _accessor.Find(key);
public IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> Find(IEnumerable<TKey> keys)
=> _accessor.Find(keys);
public IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> Iterate()
=> _accessor.Iterate();
public IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> IterateKeyBacket(TKey key)
=> _accessor.IterateKeyBacket(key);
/// <summary>
/// Deletes only new entries. Existing entries remain unchanged.
/// </summary>
public void DropData() => _temporaryAccessor.DropData();
public string GetCatalogPath() => _accessor.GetCatalogPath();
public void RebuildIndex() => _accessor.RebuildIndex();
public void Store(TKey key, TInput value) => _temporaryAccessor.Store(key, value);
public int CountDataFiles() => Math.Max(_accessor.CountDataFiles(),
_temporaryAccessor.CountDataFiles());
public void Dispose()
{
_accessor.Dispose();
_temporaryAccessor.Dispose();
}
}
}

@ -30,11 +30,12 @@ namespace ZeroLevel.Services.PartitionStorage
Directory.CreateDirectory(_catalog);
}
}
#region API
public string GetCatalogPath()
{
return _catalog;
}
public int CountDataFiles() => Directory.GetFiles(_catalog)?.Length ?? 0;
public string GetCatalogPath() => _catalog;
public void DropData() => FSUtils.CleanAndTestFolder(_catalog);
#region API !only after data compression!
public StorePartitionKeyValueSearchResult<TKey, TValue> Find(TKey key)
{
var fileName = _options.GetFileName(key, _info);
@ -92,39 +93,6 @@ namespace ZeroLevel.Services.PartitionStorage
}
}
}
public void CompleteStoreAndRebuild()
{
// Close all write streams
foreach (var s in _writeStreams)
{
try
{
s.Value.Dispose();
}
catch { }
}
var files = Directory.GetFiles(_catalog);
if (files != null && files.Length > 1)
{
Parallel.ForEach(files, file => CompressFile(file));
}
}
public void Store(TKey key, TInput value)
{
var fileName = _options.GetFileName(key, _info);
var stream = GetWriteStream(fileName);
stream.SerializeCompatible(key);
stream.SerializeCompatible(value);
}
public int CountDataFiles()
{
var files = Directory.GetFiles(_catalog);
return files?.Length ?? 0;
}
public void DropData()
{
FSUtils.CleanAndTestFolder(_catalog);
}
public IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> Iterate()
{
var files = Directory.GetFiles(_catalog);
@ -205,6 +173,34 @@ namespace ZeroLevel.Services.PartitionStorage
}
#endregion
#region API !only before data compression!
public void Store(TKey key, TInput value)
{
var fileName = _options.GetFileName(key, _info);
var stream = GetWriteStream(fileName);
stream.SerializeCompatible(key);
stream.SerializeCompatible(value);
}
public void CompleteAddingAndCompress()
{
// Close all write streams
foreach (var s in _writeStreams)
{
try
{
s.Value.Dispose();
}
catch { }
}
var files = Directory.GetFiles(_catalog);
if (files != null && files.Length > 1)
{
Parallel.ForEach(files, file => CompressFile(file));
}
}
#endregion
#region Private methods
private IEnumerable<StorePartitionKeyValueSearchResult<TKey, TValue>> Find(string fileName,
TKey[] keys)
@ -282,7 +278,7 @@ namespace ZeroLevel.Services.PartitionStorage
}
}
}
private void CompressFile(string file)
internal void CompressFile(string file)
{
var dict = new Dictionary<TKey, HashSet<TInput>>();
using (var reader = new MemoryStreamReader(new FileStream(file, FileMode.Open, FileAccess.Read, FileShare.None, 4096 * 1024)))

Loading…
Cancel
Save

Powered by TurnKey Linux.