pull/4/head
Ogoun 1 year ago
parent 7d1f8f228b
commit e775f1387c

@ -141,10 +141,10 @@ namespace PartitionFileStorageTest
var val = pairs[i].Item2; var val = pairs[i].Item2;
if (testData.ContainsKey(key) == false) testData[key] = new HashSet<ulong>(); if (testData.ContainsKey(key) == false) testData[key] = new HashSet<ulong>();
testData[key].Add(val); testData[key].Add(val);
merger.Store(key, val); await merger.Store(key, val);
} }
Log.Info($"Merge journal filled: {sw.ElapsedMilliseconds}ms. New records merged: {merger.TotalRecords}"); Log.Info($"Merge journal filled: {sw.ElapsedMilliseconds}ms. New records merged: {merger.TotalRecords}");
merger.Compress(); // auto reindex await merger.Compress(); // auto reindex
sw.Stop(); sw.Stop();
Log.Info($"Compress after merge: {sw.ElapsedMilliseconds}ms"); Log.Info($"Compress after merge: {sw.ElapsedMilliseconds}ms");
@ -319,7 +319,7 @@ namespace PartitionFileStorageTest
} }
Log.Info($"Merge journal filled: {sw.ElapsedMilliseconds}ms. Total data count: {PAIRS_COUNT}. Unique keys: {Keys.Count}"); Log.Info($"Merge journal filled: {sw.ElapsedMilliseconds}ms. Total data count: {PAIRS_COUNT}. Unique keys: {Keys.Count}");
merger.Compress(); // auto reindex await merger.Compress(); // auto reindex
} }
sw.Stop(); sw.Stop();
Log.Info($"Compress after merge: {sw.ElapsedMilliseconds}ms"); Log.Info($"Compress after merge: {sw.ElapsedMilliseconds}ms");

@ -1,7 +1,5 @@
using System; using System.Collections.Concurrent;
using System.Collections.Concurrent;
using System.Net; using System.Net;
using System.Threading;
using ZeroLevel; using ZeroLevel;
using ZeroLevel.Network; using ZeroLevel.Network;
using ZeroLevel.Services.Applications; using ZeroLevel.Services.Applications;

@ -163,7 +163,7 @@ namespace ZeroLevel.Collections
/// </summary> /// </summary>
/// <param name="comparer">The <see cref="IEqualityComparer{T}"/> /// <param name="comparer">The <see cref="IEqualityComparer{T}"/>
/// implementation to use when comparing items.</param> /// implementation to use when comparing items.</param>
public ConcurrentHashSet(IEqualityComparer<T>? comparer) public ConcurrentHashSet(IEqualityComparer<T> comparer)
: this(DefaultConcurrencyLevel, DefaultCapacity, true, comparer) : this(DefaultConcurrencyLevel, DefaultCapacity, true, comparer)
{ {
} }
@ -184,7 +184,7 @@ namespace ZeroLevel.Collections
/// <exception cref="ArgumentNullException"><paramref name="collection"/> is a null reference /// <exception cref="ArgumentNullException"><paramref name="collection"/> is a null reference
/// (Nothing in Visual Basic). /// (Nothing in Visual Basic).
/// </exception> /// </exception>
public ConcurrentHashSet(IEnumerable<T> collection, IEqualityComparer<T>? comparer) public ConcurrentHashSet(IEnumerable<T> collection, IEqualityComparer<T> comparer)
: this(comparer) : this(comparer)
{ {
if (collection == null) throw new ArgumentNullException(nameof(collection)); if (collection == null) throw new ArgumentNullException(nameof(collection));
@ -211,7 +211,7 @@ namespace ZeroLevel.Collections
/// <exception cref="ArgumentOutOfRangeException"> /// <exception cref="ArgumentOutOfRangeException">
/// <paramref name="concurrencyLevel"/> is less than 1. /// <paramref name="concurrencyLevel"/> is less than 1.
/// </exception> /// </exception>
public ConcurrentHashSet(int concurrencyLevel, IEnumerable<T> collection, IEqualityComparer<T>? comparer) public ConcurrentHashSet(int concurrencyLevel, IEnumerable<T> collection, IEqualityComparer<T> comparer)
: this(concurrencyLevel, DefaultCapacity, false, comparer) : this(concurrencyLevel, DefaultCapacity, false, comparer)
{ {
if (collection == null) throw new ArgumentNullException(nameof(collection)); if (collection == null) throw new ArgumentNullException(nameof(collection));
@ -235,12 +235,12 @@ namespace ZeroLevel.Collections
/// <paramref name="concurrencyLevel"/> is less than 1. -or- /// <paramref name="concurrencyLevel"/> is less than 1. -or-
/// <paramref name="capacity"/> is less than 0. /// <paramref name="capacity"/> is less than 0.
/// </exception> /// </exception>
public ConcurrentHashSet(int concurrencyLevel, int capacity, IEqualityComparer<T>? comparer) public ConcurrentHashSet(int concurrencyLevel, int capacity, IEqualityComparer<T> comparer)
: this(concurrencyLevel, capacity, false, comparer) : this(concurrencyLevel, capacity, false, comparer)
{ {
} }
private ConcurrentHashSet(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<T>? comparer) private ConcurrentHashSet(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<T> comparer)
{ {
if (concurrencyLevel < 1) throw new ArgumentOutOfRangeException(nameof(concurrencyLevel)); if (concurrencyLevel < 1) throw new ArgumentOutOfRangeException(nameof(concurrencyLevel));
if (capacity < 0) throw new ArgumentOutOfRangeException(nameof(capacity)); if (capacity < 0) throw new ArgumentOutOfRangeException(nameof(capacity));
@ -375,7 +375,7 @@ namespace ZeroLevel.Collections
continue; continue;
} }
Node? previous = null; Node previous = null;
for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next) for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next)
{ {
Debug.Assert((previous == null && current == tables.Buckets[bucketNo]) || previous!.Next == current); Debug.Assert((previous == null && current == tables.Buckets[bucketNo]) || previous!.Next == current);
@ -439,8 +439,8 @@ namespace ZeroLevel.Collections
private readonly ConcurrentHashSet<T> _set; private readonly ConcurrentHashSet<T> _set;
private Node?[]? _buckets; private Node[] _buckets;
private Node? _node; private Node _node;
private int _i; private int _i;
private int _state; private int _state;
@ -468,7 +468,7 @@ namespace ZeroLevel.Collections
/// <value>The element in the collection at the current position of the enumerator.</value> /// <value>The element in the collection at the current position of the enumerator.</value>
public T Current { get; private set; } public T Current { get; private set; }
object? IEnumerator.Current => Current; object IEnumerator.Current => Current;
/// <summary> /// <summary>
/// Sets the enumerator to its initial position, which is before the first element in the collection. /// Sets the enumerator to its initial position, which is before the first element in the collection.
@ -501,7 +501,7 @@ namespace ZeroLevel.Collections
goto case StateOuterloop; goto case StateOuterloop;
case StateOuterloop: case StateOuterloop:
Node?[]? buckets = _buckets; Node[] buckets = _buckets;
Debug.Assert(buckets != null); Debug.Assert(buckets != null);
int i = ++_i; int i = ++_i;
@ -516,7 +516,7 @@ namespace ZeroLevel.Collections
goto default; goto default;
case StateInnerLoop: case StateInnerLoop:
Node? node = _node; Node node = _node;
if (node != null) if (node != null)
{ {
Current = node.Item; Current = node.Item;
@ -606,7 +606,7 @@ namespace ZeroLevel.Collections
} }
// Try to find this item in the bucket // Try to find this item in the bucket
Node? previous = null; Node previous = null;
for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next) for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next)
{ {
Debug.Assert(previous == null && current == tables.Buckets[bucketNo] || previous!.Next == current); Debug.Assert(previous == null && current == tables.Buckets[bucketNo] || previous!.Next == current);
@ -878,12 +878,12 @@ namespace ZeroLevel.Collections
private class Tables private class Tables
{ {
public readonly Node?[] Buckets; public readonly Node[] Buckets;
public readonly object[] Locks; public readonly object[] Locks;
public readonly int[] CountPerLock; public readonly int[] CountPerLock;
public Tables(Node?[] buckets, object[] locks, int[] countPerLock) public Tables(Node[] buckets, object[] locks, int[] countPerLock)
{ {
Buckets = buckets; Buckets = buckets;
Locks = locks; Locks = locks;
@ -896,9 +896,9 @@ namespace ZeroLevel.Collections
public readonly T Item; public readonly T Item;
public readonly int Hashcode; public readonly int Hashcode;
public volatile Node? Next; public volatile Node Next;
public Node(T item, int hashcode, Node? next) public Node(T item, int hashcode, Node next)
{ {
Item = item; Item = item;
Hashcode = hashcode; Hashcode = hashcode;

@ -12,7 +12,7 @@ namespace ZeroLevel.Services.Config.Implementation
while (enumerator.MoveNext()) while (enumerator.MoveNext())
{ {
string key = (string)enumerator.Entry.Key; string key = (string)enumerator.Entry.Key;
string value = ((string?)enumerator.Entry.Value) ?? string.Empty; string value = ((string)enumerator.Entry.Value) ?? string.Empty;
result.Append(key, value); result.Append(key, value);
} }
return result; return result;

@ -16,10 +16,10 @@ namespace MemoryPools
private protected readonly ObjectWrapper[] _items; private protected readonly ObjectWrapper[] _items;
private protected readonly IPooledObjectPolicy<T> _policy; private protected readonly IPooledObjectPolicy<T> _policy;
private protected readonly bool _isDefaultPolicy; private protected readonly bool _isDefaultPolicy;
private protected T? _firstItem; private protected T _firstItem;
// This class was introduced in 2.1 to avoid the interface call where possible // This class was introduced in 2.1 to avoid the interface call where possible
private protected readonly PooledObjectPolicy<T>? _fastPolicy; private protected readonly PooledObjectPolicy<T> _fastPolicy;
/// <summary> /// <summary>
/// Creates an instance of <see cref="DefaultObjectPool{T}"/>. /// Creates an instance of <see cref="DefaultObjectPool{T}"/>.
@ -95,7 +95,7 @@ namespace MemoryPools
private protected struct ObjectWrapper private protected struct ObjectWrapper
{ {
public T? Element; public T Element;
} }
} }
} }

@ -81,9 +81,9 @@ namespace MemoryPools
} }
} }
private static void DisposeItem(T? item) private static void DisposeItem(T item)
{ {
if (item is IDisposable disposable) if (item != null && item is IDisposable disposable)
{ {
disposable.Dispose(); disposable.Dispose();
} }

@ -23,7 +23,7 @@ namespace MemoryPools
public static class ObjectPool public static class ObjectPool
{ {
/// <inheritdoc /> /// <inheritdoc />
public static ObjectPool<T> Create<T>(IPooledObjectPolicy<T>? policy = null) where T : class, new() public static ObjectPool<T> Create<T>(IPooledObjectPolicy<T> policy = null) where T : class, new()
{ {
var provider = new DefaultObjectPoolProvider(); var provider = new DefaultObjectPoolProvider();
return provider.Create(policy ?? new DefaultPooledObjectPolicy<T>()); return provider.Create(policy ?? new DefaultPooledObjectPolicy<T>());

@ -41,10 +41,9 @@ namespace ZeroLevel.Services.PartitionStorage
var files = Directory.GetFiles(_dataCatalog); var files = Directory.GetFiles(_dataCatalog);
if (files != null && files.Length > 0) if (files != null && files.Length > 0)
{ {
foreach (var file in files) foreach (var file in files)
{ {
RebuildFileIndex(Path.GetFileName(file)); await RebuildFileIndex(Path.GetFileName(file));
} }
} }
} }
@ -121,7 +120,7 @@ namespace ZeroLevel.Services.PartitionStorage
for (int i = 0; i < _stepValue; i++) for (int i = 0; i < _stepValue; i++)
{ {
var pair = d_arr[i * step]; var pair = d_arr[i * step];
writer.WriteCompatible(pair.Key); await Serializer.KeySerializer.Invoke(writer, pair.Key);
writer.WriteLong(pair.Value); writer.WriteLong(pair.Value);
} }
} }
@ -162,7 +161,7 @@ namespace ZeroLevel.Services.PartitionStorage
await Serializer.ValueDeserializer.Invoke(reader); await Serializer.ValueDeserializer.Invoke(reader);
if (counter == 0) if (counter == 0)
{ {
writer.WriteCompatible(k); await Serializer.KeySerializer.Invoke(writer, k.Value);
writer.WriteLong(pos); writer.WriteLong(pos);
counter = _stepValue; counter = _stepValue;
} }

@ -15,6 +15,7 @@ namespace ZeroLevel.Services.PartitionStorage
/// <typeparam name="TMeta">Meta information for partition search</typeparam> /// <typeparam name="TMeta">Meta information for partition search</typeparam>
public class StoreOptions<TKey, TInput, TValue, TMeta> public class StoreOptions<TKey, TInput, TValue, TMeta>
{ {
private const string DEFAULT_FILE_NAME = "defaultGroup";
/// <summary> /// <summary>
/// Method for key comparison /// Method for key comparison
/// </summary> /// </summary>
@ -57,7 +58,12 @@ namespace ZeroLevel.Services.PartitionStorage
internal string GetFileName(TKey key, TMeta info) internal string GetFileName(TKey key, TMeta info)
{ {
return FilePartition.FileNameExtractor(key, info); var name = FilePartition.FileNameExtractor(key, info);
if (string.IsNullOrWhiteSpace(name))
{
name = DEFAULT_FILE_NAME;
}
return name;
} }
internal string GetCatalogPath(TMeta info) internal string GetCatalogPath(TMeta info)
{ {

@ -20,11 +20,14 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
protected readonly TMeta _info; protected readonly TMeta _info;
protected readonly string _catalog; protected readonly string _catalog;
private SemaphoreSlim _writersLock = new SemaphoreSlim(1);
private readonly Dictionary<string, MemoryStreamWriter> _writeStreams = new Dictionary<string, MemoryStreamWriter>();
protected IStoreSerializer<TKey, TInput, TValue> Serializer { get; } protected IStoreSerializer<TKey, TInput, TValue> Serializer { get; }
protected readonly StoreOptions<TKey, TInput, TValue, TMeta> _options; protected readonly StoreOptions<TKey, TInput, TValue, TMeta> _options;
private readonly IndexBuilder<TKey, TValue> _indexBuilder; private readonly IndexBuilder<TKey, TValue> _indexBuilder;
private readonly Dictionary<string, MemoryStreamWriter> _writeStreams = new Dictionary<string, MemoryStreamWriter>();
private readonly PhisicalFileAccessorCachee _phisicalFileAccessor; private readonly PhisicalFileAccessorCachee _phisicalFileAccessor;
protected PhisicalFileAccessorCachee PhisicalFileAccessorCachee => _phisicalFileAccessor; protected PhisicalFileAccessorCachee PhisicalFileAccessorCachee => _phisicalFileAccessor;
@ -41,8 +44,8 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
Directory.CreateDirectory(_catalog); Directory.CreateDirectory(_catalog);
} }
_phisicalFileAccessor = fileAccessorCachee; _phisicalFileAccessor = fileAccessorCachee;
_indexBuilder = _options.Index.Enabled ? new IndexBuilder<TKey, TValue>(_options.Index.StepType, _options.Index.StepValue, _catalog, fileAccessorCachee, Serializer) : null;
Serializer = serializer; Serializer = serializer;
_indexBuilder = _options.Index.Enabled ? new IndexBuilder<TKey, TValue>(_options.Index.StepType, _options.Index.StepValue, _catalog, fileAccessorCachee, Serializer) : null;
} }
#region IStorePartitionBase #region IStorePartitionBase
@ -99,11 +102,77 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
{ {
s.Value.Stream.Flush(); s.Value.Stream.Flush();
s.Value.Dispose(); s.Value.Dispose();
s.Value.DisposeAsync();
} }
catch { } catch { }
} }
_writeStreams.Clear(); _writeStreams.Clear();
} }
protected async Task WriteStreamAction(string fileName, Func<MemoryStreamWriter, Task> writeAction)
{
MemoryStreamWriter writer;
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
await _writersLock.WaitAsync();
try
{
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
var filePath = Path.Combine(_catalog, fileName);
var stream = new FileStream(filePath, FileMode.Append, FileAccess.Write, FileShare.None, 4096 * 1024);
var new_w = new MemoryStreamWriter(stream);
_writeStreams[fileName] = new_w;
writer = new_w;
}
}
finally
{
_writersLock.Release();
}
}
await writeAction.Invoke(writer);
}
protected async Task SafeWriteStreamAction(string fileName, Func<MemoryStreamWriter, Task> writeAction)
{
MemoryStreamWriter writer;
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
await _writersLock.WaitAsync();
try
{
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
var filePath = Path.Combine(_catalog, fileName);
var stream = new FileStream(filePath, FileMode.Append, FileAccess.Write, FileShare.None, 4096 * 1024);
var new_w = new MemoryStreamWriter(stream);
_writeStreams[fileName] = new_w;
writer = new_w;
}
}
finally
{
_writersLock.Release();
}
}
await writeAction.Invoke(writer);
/*
await writer.WaitLockAsync();
try
{
await writeAction.Invoke(writer);
}
finally
{
writer.Release();
}*/
}
/*
/// <summary> /// <summary>
/// Attempting to open a file for writing /// Attempting to open a file for writing
/// </summary> /// </summary>
@ -145,6 +214,11 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
writer = null; writer = null;
return false; return false;
} }
*/
/// <summary> /// <summary>
/// Attempting to open a file for reading /// Attempting to open a file for reading
/// </summary> /// </summary>

@ -88,35 +88,24 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
#endregion #endregion
#region Private methods #region Private methods
private async Task StoreDirect(TKey key, TInput value) private async Task StoreDirect(TKey key, TInput value)
{ {
var groupKey = _options.GetFileName(key, _info); var groupKey = _options.GetFileName(key, _info);
if (TryGetWriteStream(groupKey, out var stream)) await WriteStreamAction(groupKey, async stream =>
{ {
await Serializer.KeySerializer.Invoke(stream, key); await Serializer.KeySerializer.Invoke(stream, key);
await Serializer.InputSerializer.Invoke(stream, value); await Serializer.InputSerializer.Invoke(stream, value);
} });
} }
private async Task StoreDirectSafe(TKey key, TInput value) private async Task StoreDirectSafe(TKey key, TInput value)
{ {
var groupKey = _options.GetFileName(key, _info); var groupKey = _options.GetFileName(key, _info);
bool lockTaken = false; await SafeWriteStreamAction(groupKey, async stream =>
if (TryGetWriteStream(groupKey, out var stream))
{
Monitor.Enter(stream, ref lockTaken);
try
{ {
await Serializer.KeySerializer.Invoke(stream, key); await Serializer.KeySerializer.Invoke(stream, key);
await Serializer.InputSerializer.Invoke(stream, value); await Serializer.InputSerializer.Invoke(stream, value);
} });
finally
{
if (lockTaken)
{
Monitor.Exit(stream);
}
}
}
} }
internal async Task CompressFile(string file) internal async Task CompressFile(string file)

@ -95,47 +95,39 @@ namespace ZeroLevel.Services.PartitionStorage
private async Task<bool> StoreDirect(TKey key, TInput value) private async Task<bool> StoreDirect(TKey key, TInput value)
{ {
var groupKey = _options.GetFileName(key, _info); var groupKey = _options.GetFileName(key, _info);
if (TryGetWriteStream(groupKey, out var stream)) try
{
await WriteStreamAction(groupKey, async stream =>
{ {
await Serializer.KeySerializer.Invoke(stream, key); await Serializer.KeySerializer.Invoke(stream, key);
Thread.MemoryBarrier();
await Serializer.InputSerializer.Invoke(stream, value); await Serializer.InputSerializer.Invoke(stream, value);
});
return true; return true;
} }
else catch (Exception ex)
{ {
Log.SystemError($"Fault create write stream for key '{groupKey}'"); Log.SystemError(ex, $"[StoreDirect] Fault use writeStream for key '{groupKey}'");
}
return false; return false;
} }
}
private async Task<bool> StoreDirectSafe(TKey key, TInput value) private async Task<bool> StoreDirectSafe(TKey key, TInput value)
{ {
var groupKey = _options.GetFileName(key, _info); var groupKey = _options.GetFileName(key, _info);
bool lockTaken = false;
if (TryGetWriteStream(groupKey, out var stream))
{
Monitor.Enter(stream, ref lockTaken);
try try
{
await SafeWriteStreamAction(groupKey, async stream =>
{ {
await Serializer.KeySerializer.Invoke(stream, key); await Serializer.KeySerializer.Invoke(stream, key);
Thread.MemoryBarrier();
await Serializer.InputSerializer.Invoke(stream, value); await Serializer.InputSerializer.Invoke(stream, value);
});
return true; return true;
} }
finally catch(Exception ex)
{
if (lockTaken)
{ {
Monitor.Exit(stream); Log.SystemError(ex, $"[StoreDirectSafe] Fault use writeStream for key '{groupKey}'");
}
}
}
else
{
Log.SystemError($"Fault create write stream for key '{groupKey}'");
}
return false; return false;
} }
}
internal async Task CompressFile(string file) internal async Task CompressFile(string file)
{ {

@ -6,6 +6,7 @@ using System.Linq;
using System.Net; using System.Net;
using System.Runtime.CompilerServices; using System.Runtime.CompilerServices;
using System.Text; using System.Text;
using System.Threading;
using System.Threading.Tasks; using System.Threading.Tasks;
using ZeroLevel.Services.Extensions; using ZeroLevel.Services.Extensions;
@ -463,6 +464,10 @@ namespace ZeroLevel.Services.Serialization
public partial class MemoryStreamWriter : public partial class MemoryStreamWriter :
IAsyncBinaryWriter IAsyncBinaryWriter
{ {
private SemaphoreSlim _writeLock = new SemaphoreSlim(1);
public async Task WaitLockAsync() => await _writeLock.WaitAsync();
public void Release() => _writeLock.Release();
/// <summary> /// <summary>
/// Write char (2 bytes) /// Write char (2 bytes)
/// </summary> /// </summary>
@ -663,10 +668,9 @@ namespace ZeroLevel.Services.Serialization
} }
} }
public async Task DisposeAsync() public void DisposeAsync()
{ {
await _stream.FlushAsync(); _writeLock.Dispose();
await _stream.DisposeAsync();
} }
#region Extension #region Extension
@ -676,9 +680,19 @@ namespace ZeroLevel.Services.Serialization
/// <summary> /// <summary>
/// Increase writing by batches /// Increase writing by batches
/// </summary> /// </summary>
private async Task OptimizedWriteCollectionByChunksAsync<T>(IEnumerable<T> collection, Action<MemoryStreamWriter, T> saveAction, int chunk_size) private async Task OptimizedWriteCollectionByChunksAsync<T>(IEnumerable<T> collection, Action<MemoryStreamWriter, T> saveAction, Func<MemoryStreamWriter, T, Task> asyncSaveAction, int chunk_size)
{ {
if (collection != null) if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
await asyncSaveAction.Invoke(this, item);
}
}
else
{ {
MockCount(); MockCount();
int count = 0; int count = 0;
@ -711,6 +725,7 @@ namespace ZeroLevel.Services.Serialization
} }
UpdateCount(count); UpdateCount(count);
} }
}
else else
{ {
WriteInt32(0); WriteInt32(0);
@ -759,37 +774,47 @@ namespace ZeroLevel.Services.Serialization
} }
} }
public async Task WriteCollectionAsync(IEnumerable<IPAddress> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIP(i), BATCH_MEMORY_SIZE_LIMIT / 5); public async Task WriteCollectionAsync(IEnumerable<IPAddress> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIP(i), (w, i) => w.WriteIPAsync(i), BATCH_MEMORY_SIZE_LIMIT / 5);
public async Task WriteCollectionAsync(IEnumerable<IPEndPoint> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIPEndpoint(i), BATCH_MEMORY_SIZE_LIMIT / 9); public async Task WriteCollectionAsync(IEnumerable<IPEndPoint> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIPEndpoint(i), (w, i) => w.WriteIPEndpointAsync(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<Guid> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteGuid(i), BATCH_MEMORY_SIZE_LIMIT / 16); public async Task WriteCollectionAsync(IEnumerable<Guid> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteGuid(i), (w, i) => w.WriteGuidAsync(i), BATCH_MEMORY_SIZE_LIMIT / 16);
public async Task WriteCollectionAsync(IEnumerable<DateTime> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), BATCH_MEMORY_SIZE_LIMIT / 9); public async Task WriteCollectionAsync(IEnumerable<DateTime> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), (w, i) => w.WriteDateTimeAsync(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<DateTime?> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), BATCH_MEMORY_SIZE_LIMIT / 9); public async Task WriteCollectionAsync(IEnumerable<DateTime?> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), (w, i) => w.WriteDateTimeAsync(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<UInt64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteULong(i), BATCH_MEMORY_SIZE_LIMIT / 8); public async Task WriteCollectionAsync(IEnumerable<UInt64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteULong(i), (w, i) => w.WriteULongAsync(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<UInt32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUInt32(i), BATCH_MEMORY_SIZE_LIMIT / 4); public async Task WriteCollectionAsync(IEnumerable<UInt32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUInt32(i), (w, i) => w.WriteUInt32Async(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<char> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteChar(i), BATCH_MEMORY_SIZE_LIMIT / 2); public async Task WriteCollectionAsync(IEnumerable<char> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteChar(i), (w, i) => w.WriteCharAsync(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<short> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteShort(i), BATCH_MEMORY_SIZE_LIMIT / 2); public async Task WriteCollectionAsync(IEnumerable<short> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteShort(i), (w, i) => w.WriteShortAsync(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<ushort> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUShort(i), BATCH_MEMORY_SIZE_LIMIT / 2); public async Task WriteCollectionAsync(IEnumerable<ushort> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUShort(i), (w, i) => w.WriteUShortAsync(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<Int64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteLong(i), BATCH_MEMORY_SIZE_LIMIT / 8); public async Task WriteCollectionAsync(IEnumerable<Int64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteLong(i), (w, i) => w.WriteLongAsync(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<Int32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteInt32(i), BATCH_MEMORY_SIZE_LIMIT / 4); public async Task WriteCollectionAsync(IEnumerable<Int32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteInt32(i), (w, i) => w.WriteInt32Async(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<float> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteFloat(i), BATCH_MEMORY_SIZE_LIMIT / 4); public async Task WriteCollectionAsync(IEnumerable<float> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteFloat(i), (w, i) => w.WriteFloatAsync(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<Double> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDouble(i), BATCH_MEMORY_SIZE_LIMIT / 8); public async Task WriteCollectionAsync(IEnumerable<Double> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDouble(i), (w, i) => w.WriteDoubleAsync(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<bool> collection) public async Task WriteCollectionAsync(IEnumerable<bool> collection)
{ {
if (collection != null) if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
WriteBoolean(item);
}
}
else
{ {
MockCount(); MockCount();
@ -825,6 +850,7 @@ namespace ZeroLevel.Services.Serialization
UpdateCount(count); UpdateCount(count);
} }
}
else else
{ {
WriteInt32(0); WriteInt32(0);
@ -834,9 +860,18 @@ namespace ZeroLevel.Services.Serialization
public async Task WriteCollectionAsync(IEnumerable<byte> collection) public async Task WriteCollectionAsync(IEnumerable<byte> collection)
{ {
if (collection != null) if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
WriteByte(item);
}
}
else
{ {
MockCount(); MockCount();
int count = 0; int count = 0;
if (_stream is MemoryStream) if (_stream is MemoryStream)
{ {
@ -869,6 +904,7 @@ namespace ZeroLevel.Services.Serialization
UpdateCount(count); UpdateCount(count);
} }
}
else else
{ {
WriteInt32(0); WriteInt32(0);
@ -878,6 +914,16 @@ namespace ZeroLevel.Services.Serialization
public async Task WriteCollectionAsync(IEnumerable<byte[]> collection) public async Task WriteCollectionAsync(IEnumerable<byte[]> collection)
{ {
if (collection != null) if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
WriteBytes(item);
}
}
else
{ {
MockCount(); MockCount();
@ -900,15 +946,16 @@ namespace ZeroLevel.Services.Serialization
} }
UpdateCount(count); UpdateCount(count);
} }
}
else else
{ {
WriteInt32(0); WriteInt32(0);
} }
} }
public async Task WriteCollectionAsync(IEnumerable<decimal> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDecimal(i), BATCH_MEMORY_SIZE_LIMIT / 16); public async Task WriteCollectionAsync(IEnumerable<decimal> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDecimal(i), (w, i) => w.WriteDecimalAsync(i), BATCH_MEMORY_SIZE_LIMIT / 16);
public async Task WriteCollectionAsync(IEnumerable<TimeSpan> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteTimeSpan(i), BATCH_MEMORY_SIZE_LIMIT / 16); public async Task WriteCollectionAsync(IEnumerable<TimeSpan> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteTimeSpan(i), (w, i) => w.WriteTimeSpanAsync(i), BATCH_MEMORY_SIZE_LIMIT / 16);
#endregion #endregion
#region Arrays #region Arrays

@ -22,7 +22,7 @@ namespace ZeroLevel.Services
public byte[] Encrypt(byte[] data) public byte[] Encrypt(byte[] data)
{ {
using (Aes aes = new AesManaged()) using (Aes aes = AesManaged.Create())
{ {
aes.Padding = PaddingMode.PKCS7; aes.Padding = PaddingMode.PKCS7;
aes.KeySize = AesKeySizeInBits; aes.KeySize = AesKeySizeInBits;
@ -44,7 +44,7 @@ namespace ZeroLevel.Services
public byte[] Decrypt(byte[] data) public byte[] Decrypt(byte[] data)
{ {
using (Aes aes = new AesManaged()) using (Aes aes = AesManaged.Create())
{ {
aes.Padding = PaddingMode.PKCS7; aes.Padding = PaddingMode.PKCS7;
aes.KeySize = AesKeySizeInBits; aes.KeySize = AesKeySizeInBits;

@ -6,16 +6,16 @@
</Description> </Description>
<Authors>ogoun</Authors> <Authors>ogoun</Authors>
<Company>ogoun</Company> <Company>ogoun</Company>
<AssemblyVersion>3.4.0.5</AssemblyVersion> <AssemblyVersion>3.4.0.6</AssemblyVersion>
<PackageReleaseNotes>KVDB</PackageReleaseNotes> <PackageReleaseNotes>KVDB fixes</PackageReleaseNotes>
<PackageProjectUrl>https://github.com/ogoun/Zero/wiki</PackageProjectUrl> <PackageProjectUrl>https://github.com/ogoun/Zero/wiki</PackageProjectUrl>
<Copyright>Copyright Ogoun 2023</Copyright> <Copyright>Copyright Ogoun 2023</Copyright>
<PackageLicenseUrl></PackageLicenseUrl> <PackageLicenseUrl></PackageLicenseUrl>
<PackageIconUrl></PackageIconUrl> <PackageIconUrl></PackageIconUrl>
<RepositoryUrl>https://github.com/ogoun/Zero</RepositoryUrl> <RepositoryUrl>https://github.com/ogoun/Zero</RepositoryUrl>
<RepositoryType>git</RepositoryType> <RepositoryType>git</RepositoryType>
<Version>3.4.0.5</Version> <Version>3.4.0.6</Version>
<FileVersion>3.4.0.5</FileVersion> <FileVersion>3.4.0.6</FileVersion>
<Platforms>AnyCPU;x64;x86</Platforms> <Platforms>AnyCPU;x64;x86</Platforms>
<PackageIcon>zero.png</PackageIcon> <PackageIcon>zero.png</PackageIcon>
<DebugType>full</DebugType> <DebugType>full</DebugType>

Loading…
Cancel
Save

Powered by TurnKey Linux.