pull/4/head
Ogoun 1 year ago
parent 7d1f8f228b
commit e775f1387c

@ -141,10 +141,10 @@ namespace PartitionFileStorageTest
var val = pairs[i].Item2;
if (testData.ContainsKey(key) == false) testData[key] = new HashSet<ulong>();
testData[key].Add(val);
merger.Store(key, val);
await merger.Store(key, val);
}
Log.Info($"Merge journal filled: {sw.ElapsedMilliseconds}ms. New records merged: {merger.TotalRecords}");
merger.Compress(); // auto reindex
await merger.Compress(); // auto reindex
sw.Stop();
Log.Info($"Compress after merge: {sw.ElapsedMilliseconds}ms");
@ -319,7 +319,7 @@ namespace PartitionFileStorageTest
}
Log.Info($"Merge journal filled: {sw.ElapsedMilliseconds}ms. Total data count: {PAIRS_COUNT}. Unique keys: {Keys.Count}");
merger.Compress(); // auto reindex
await merger.Compress(); // auto reindex
}
sw.Stop();
Log.Info($"Compress after merge: {sw.ElapsedMilliseconds}ms");

@ -1,7 +1,5 @@
using System;
using System.Collections.Concurrent;
using System.Collections.Concurrent;
using System.Net;
using System.Threading;
using ZeroLevel;
using ZeroLevel.Network;
using ZeroLevel.Services.Applications;

@ -163,7 +163,7 @@ namespace ZeroLevel.Collections
/// </summary>
/// <param name="comparer">The <see cref="IEqualityComparer{T}"/>
/// implementation to use when comparing items.</param>
public ConcurrentHashSet(IEqualityComparer<T>? comparer)
public ConcurrentHashSet(IEqualityComparer<T> comparer)
: this(DefaultConcurrencyLevel, DefaultCapacity, true, comparer)
{
}
@ -184,7 +184,7 @@ namespace ZeroLevel.Collections
/// <exception cref="ArgumentNullException"><paramref name="collection"/> is a null reference
/// (Nothing in Visual Basic).
/// </exception>
public ConcurrentHashSet(IEnumerable<T> collection, IEqualityComparer<T>? comparer)
public ConcurrentHashSet(IEnumerable<T> collection, IEqualityComparer<T> comparer)
: this(comparer)
{
if (collection == null) throw new ArgumentNullException(nameof(collection));
@ -211,7 +211,7 @@ namespace ZeroLevel.Collections
/// <exception cref="ArgumentOutOfRangeException">
/// <paramref name="concurrencyLevel"/> is less than 1.
/// </exception>
public ConcurrentHashSet(int concurrencyLevel, IEnumerable<T> collection, IEqualityComparer<T>? comparer)
public ConcurrentHashSet(int concurrencyLevel, IEnumerable<T> collection, IEqualityComparer<T> comparer)
: this(concurrencyLevel, DefaultCapacity, false, comparer)
{
if (collection == null) throw new ArgumentNullException(nameof(collection));
@ -235,12 +235,12 @@ namespace ZeroLevel.Collections
/// <paramref name="concurrencyLevel"/> is less than 1. -or-
/// <paramref name="capacity"/> is less than 0.
/// </exception>
public ConcurrentHashSet(int concurrencyLevel, int capacity, IEqualityComparer<T>? comparer)
public ConcurrentHashSet(int concurrencyLevel, int capacity, IEqualityComparer<T> comparer)
: this(concurrencyLevel, capacity, false, comparer)
{
}
private ConcurrentHashSet(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<T>? comparer)
private ConcurrentHashSet(int concurrencyLevel, int capacity, bool growLockArray, IEqualityComparer<T> comparer)
{
if (concurrencyLevel < 1) throw new ArgumentOutOfRangeException(nameof(concurrencyLevel));
if (capacity < 0) throw new ArgumentOutOfRangeException(nameof(capacity));
@ -375,7 +375,7 @@ namespace ZeroLevel.Collections
continue;
}
Node? previous = null;
Node previous = null;
for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next)
{
Debug.Assert((previous == null && current == tables.Buckets[bucketNo]) || previous!.Next == current);
@ -439,8 +439,8 @@ namespace ZeroLevel.Collections
private readonly ConcurrentHashSet<T> _set;
private Node?[]? _buckets;
private Node? _node;
private Node[] _buckets;
private Node _node;
private int _i;
private int _state;
@ -468,7 +468,7 @@ namespace ZeroLevel.Collections
/// <value>The element in the collection at the current position of the enumerator.</value>
public T Current { get; private set; }
object? IEnumerator.Current => Current;
object IEnumerator.Current => Current;
/// <summary>
/// Sets the enumerator to its initial position, which is before the first element in the collection.
@ -501,7 +501,7 @@ namespace ZeroLevel.Collections
goto case StateOuterloop;
case StateOuterloop:
Node?[]? buckets = _buckets;
Node[] buckets = _buckets;
Debug.Assert(buckets != null);
int i = ++_i;
@ -516,7 +516,7 @@ namespace ZeroLevel.Collections
goto default;
case StateInnerLoop:
Node? node = _node;
Node node = _node;
if (node != null)
{
Current = node.Item;
@ -606,7 +606,7 @@ namespace ZeroLevel.Collections
}
// Try to find this item in the bucket
Node? previous = null;
Node previous = null;
for (var current = tables.Buckets[bucketNo]; current != null; current = current.Next)
{
Debug.Assert(previous == null && current == tables.Buckets[bucketNo] || previous!.Next == current);
@ -878,12 +878,12 @@ namespace ZeroLevel.Collections
private class Tables
{
public readonly Node?[] Buckets;
public readonly Node[] Buckets;
public readonly object[] Locks;
public readonly int[] CountPerLock;
public Tables(Node?[] buckets, object[] locks, int[] countPerLock)
public Tables(Node[] buckets, object[] locks, int[] countPerLock)
{
Buckets = buckets;
Locks = locks;
@ -896,9 +896,9 @@ namespace ZeroLevel.Collections
public readonly T Item;
public readonly int Hashcode;
public volatile Node? Next;
public volatile Node Next;
public Node(T item, int hashcode, Node? next)
public Node(T item, int hashcode, Node next)
{
Item = item;
Hashcode = hashcode;

@ -12,7 +12,7 @@ namespace ZeroLevel.Services.Config.Implementation
while (enumerator.MoveNext())
{
string key = (string)enumerator.Entry.Key;
string value = ((string?)enumerator.Entry.Value) ?? string.Empty;
string value = ((string)enumerator.Entry.Value) ?? string.Empty;
result.Append(key, value);
}
return result;

@ -16,10 +16,10 @@ namespace MemoryPools
private protected readonly ObjectWrapper[] _items;
private protected readonly IPooledObjectPolicy<T> _policy;
private protected readonly bool _isDefaultPolicy;
private protected T? _firstItem;
private protected T _firstItem;
// This class was introduced in 2.1 to avoid the interface call where possible
private protected readonly PooledObjectPolicy<T>? _fastPolicy;
private protected readonly PooledObjectPolicy<T> _fastPolicy;
/// <summary>
/// Creates an instance of <see cref="DefaultObjectPool{T}"/>.
@ -95,7 +95,7 @@ namespace MemoryPools
private protected struct ObjectWrapper
{
public T? Element;
public T Element;
}
}
}

@ -81,9 +81,9 @@ namespace MemoryPools
}
}
private static void DisposeItem(T? item)
private static void DisposeItem(T item)
{
if (item is IDisposable disposable)
if (item != null && item is IDisposable disposable)
{
disposable.Dispose();
}

@ -23,7 +23,7 @@ namespace MemoryPools
public static class ObjectPool
{
/// <inheritdoc />
public static ObjectPool<T> Create<T>(IPooledObjectPolicy<T>? policy = null) where T : class, new()
public static ObjectPool<T> Create<T>(IPooledObjectPolicy<T> policy = null) where T : class, new()
{
var provider = new DefaultObjectPoolProvider();
return provider.Create(policy ?? new DefaultPooledObjectPolicy<T>());

@ -41,10 +41,9 @@ namespace ZeroLevel.Services.PartitionStorage
var files = Directory.GetFiles(_dataCatalog);
if (files != null && files.Length > 0)
{
foreach (var file in files)
{
RebuildFileIndex(Path.GetFileName(file));
await RebuildFileIndex(Path.GetFileName(file));
}
}
}
@ -121,7 +120,7 @@ namespace ZeroLevel.Services.PartitionStorage
for (int i = 0; i < _stepValue; i++)
{
var pair = d_arr[i * step];
writer.WriteCompatible(pair.Key);
await Serializer.KeySerializer.Invoke(writer, pair.Key);
writer.WriteLong(pair.Value);
}
}
@ -162,7 +161,7 @@ namespace ZeroLevel.Services.PartitionStorage
await Serializer.ValueDeserializer.Invoke(reader);
if (counter == 0)
{
writer.WriteCompatible(k);
await Serializer.KeySerializer.Invoke(writer, k.Value);
writer.WriteLong(pos);
counter = _stepValue;
}

@ -15,6 +15,7 @@ namespace ZeroLevel.Services.PartitionStorage
/// <typeparam name="TMeta">Meta information for partition search</typeparam>
public class StoreOptions<TKey, TInput, TValue, TMeta>
{
private const string DEFAULT_FILE_NAME = "defaultGroup";
/// <summary>
/// Method for key comparison
/// </summary>
@ -57,7 +58,12 @@ namespace ZeroLevel.Services.PartitionStorage
internal string GetFileName(TKey key, TMeta info)
{
return FilePartition.FileNameExtractor(key, info);
var name = FilePartition.FileNameExtractor(key, info);
if (string.IsNullOrWhiteSpace(name))
{
name = DEFAULT_FILE_NAME;
}
return name;
}
internal string GetCatalogPath(TMeta info)
{

@ -20,11 +20,14 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
protected readonly TMeta _info;
protected readonly string _catalog;
private SemaphoreSlim _writersLock = new SemaphoreSlim(1);
private readonly Dictionary<string, MemoryStreamWriter> _writeStreams = new Dictionary<string, MemoryStreamWriter>();
protected IStoreSerializer<TKey, TInput, TValue> Serializer { get; }
protected readonly StoreOptions<TKey, TInput, TValue, TMeta> _options;
private readonly IndexBuilder<TKey, TValue> _indexBuilder;
private readonly Dictionary<string, MemoryStreamWriter> _writeStreams = new Dictionary<string, MemoryStreamWriter>();
private readonly PhisicalFileAccessorCachee _phisicalFileAccessor;
protected PhisicalFileAccessorCachee PhisicalFileAccessorCachee => _phisicalFileAccessor;
@ -41,8 +44,8 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
Directory.CreateDirectory(_catalog);
}
_phisicalFileAccessor = fileAccessorCachee;
_indexBuilder = _options.Index.Enabled ? new IndexBuilder<TKey, TValue>(_options.Index.StepType, _options.Index.StepValue, _catalog, fileAccessorCachee, Serializer) : null;
Serializer = serializer;
_indexBuilder = _options.Index.Enabled ? new IndexBuilder<TKey, TValue>(_options.Index.StepType, _options.Index.StepValue, _catalog, fileAccessorCachee, Serializer) : null;
}
#region IStorePartitionBase
@ -99,11 +102,77 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
{
s.Value.Stream.Flush();
s.Value.Dispose();
s.Value.DisposeAsync();
}
catch { }
}
_writeStreams.Clear();
}
protected async Task WriteStreamAction(string fileName, Func<MemoryStreamWriter, Task> writeAction)
{
MemoryStreamWriter writer;
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
await _writersLock.WaitAsync();
try
{
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
var filePath = Path.Combine(_catalog, fileName);
var stream = new FileStream(filePath, FileMode.Append, FileAccess.Write, FileShare.None, 4096 * 1024);
var new_w = new MemoryStreamWriter(stream);
_writeStreams[fileName] = new_w;
writer = new_w;
}
}
finally
{
_writersLock.Release();
}
}
await writeAction.Invoke(writer);
}
protected async Task SafeWriteStreamAction(string fileName, Func<MemoryStreamWriter, Task> writeAction)
{
MemoryStreamWriter writer;
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
await _writersLock.WaitAsync();
try
{
if (_writeStreams.TryGetValue(fileName, out writer) == false)
{
var filePath = Path.Combine(_catalog, fileName);
var stream = new FileStream(filePath, FileMode.Append, FileAccess.Write, FileShare.None, 4096 * 1024);
var new_w = new MemoryStreamWriter(stream);
_writeStreams[fileName] = new_w;
writer = new_w;
}
}
finally
{
_writersLock.Release();
}
}
await writeAction.Invoke(writer);
/*
await writer.WaitLockAsync();
try
{
await writeAction.Invoke(writer);
}
finally
{
writer.Release();
}*/
}
/*
/// <summary>
/// Attempting to open a file for writing
/// </summary>
@ -145,6 +214,11 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
writer = null;
return false;
}
*/
/// <summary>
/// Attempting to open a file for reading
/// </summary>

@ -88,35 +88,24 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
#endregion
#region Private methods
private async Task StoreDirect(TKey key, TInput value)
{
var groupKey = _options.GetFileName(key, _info);
if (TryGetWriteStream(groupKey, out var stream))
await WriteStreamAction(groupKey, async stream =>
{
await Serializer.KeySerializer.Invoke(stream, key);
await Serializer.InputSerializer.Invoke(stream, value);
}
});
}
private async Task StoreDirectSafe(TKey key, TInput value)
{
var groupKey = _options.GetFileName(key, _info);
bool lockTaken = false;
if (TryGetWriteStream(groupKey, out var stream))
{
Monitor.Enter(stream, ref lockTaken);
try
await SafeWriteStreamAction(groupKey, async stream =>
{
await Serializer.KeySerializer.Invoke(stream, key);
await Serializer.InputSerializer.Invoke(stream, value);
}
finally
{
if (lockTaken)
{
Monitor.Exit(stream);
}
}
}
});
}
internal async Task CompressFile(string file)

@ -95,47 +95,39 @@ namespace ZeroLevel.Services.PartitionStorage
private async Task<bool> StoreDirect(TKey key, TInput value)
{
var groupKey = _options.GetFileName(key, _info);
if (TryGetWriteStream(groupKey, out var stream))
try
{
await WriteStreamAction(groupKey, async stream =>
{
await Serializer.KeySerializer.Invoke(stream, key);
Thread.MemoryBarrier();
await Serializer.InputSerializer.Invoke(stream, value);
});
return true;
}
else
catch (Exception ex)
{
Log.SystemError($"Fault create write stream for key '{groupKey}'");
}
Log.SystemError(ex, $"[StoreDirect] Fault use writeStream for key '{groupKey}'");
return false;
}
}
private async Task<bool> StoreDirectSafe(TKey key, TInput value)
{
var groupKey = _options.GetFileName(key, _info);
bool lockTaken = false;
if (TryGetWriteStream(groupKey, out var stream))
{
Monitor.Enter(stream, ref lockTaken);
try
{
await SafeWriteStreamAction(groupKey, async stream =>
{
await Serializer.KeySerializer.Invoke(stream, key);
Thread.MemoryBarrier();
await Serializer.InputSerializer.Invoke(stream, value);
});
return true;
}
finally
{
if (lockTaken)
catch(Exception ex)
{
Monitor.Exit(stream);
}
}
}
else
{
Log.SystemError($"Fault create write stream for key '{groupKey}'");
}
Log.SystemError(ex, $"[StoreDirectSafe] Fault use writeStream for key '{groupKey}'");
return false;
}
}
internal async Task CompressFile(string file)
{

@ -6,6 +6,7 @@ using System.Linq;
using System.Net;
using System.Runtime.CompilerServices;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using ZeroLevel.Services.Extensions;
@ -463,6 +464,10 @@ namespace ZeroLevel.Services.Serialization
public partial class MemoryStreamWriter :
IAsyncBinaryWriter
{
private SemaphoreSlim _writeLock = new SemaphoreSlim(1);
public async Task WaitLockAsync() => await _writeLock.WaitAsync();
public void Release() => _writeLock.Release();
/// <summary>
/// Write char (2 bytes)
/// </summary>
@ -663,10 +668,9 @@ namespace ZeroLevel.Services.Serialization
}
}
public async Task DisposeAsync()
public void DisposeAsync()
{
await _stream.FlushAsync();
await _stream.DisposeAsync();
_writeLock.Dispose();
}
#region Extension
@ -676,9 +680,19 @@ namespace ZeroLevel.Services.Serialization
/// <summary>
/// Increase writing by batches
/// </summary>
private async Task OptimizedWriteCollectionByChunksAsync<T>(IEnumerable<T> collection, Action<MemoryStreamWriter, T> saveAction, int chunk_size)
private async Task OptimizedWriteCollectionByChunksAsync<T>(IEnumerable<T> collection, Action<MemoryStreamWriter, T> saveAction, Func<MemoryStreamWriter, T, Task> asyncSaveAction, int chunk_size)
{
if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
await asyncSaveAction.Invoke(this, item);
}
}
else
{
MockCount();
int count = 0;
@ -711,6 +725,7 @@ namespace ZeroLevel.Services.Serialization
}
UpdateCount(count);
}
}
else
{
WriteInt32(0);
@ -759,37 +774,47 @@ namespace ZeroLevel.Services.Serialization
}
}
public async Task WriteCollectionAsync(IEnumerable<IPAddress> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIP(i), BATCH_MEMORY_SIZE_LIMIT / 5);
public async Task WriteCollectionAsync(IEnumerable<IPAddress> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIP(i), (w, i) => w.WriteIPAsync(i), BATCH_MEMORY_SIZE_LIMIT / 5);
public async Task WriteCollectionAsync(IEnumerable<IPEndPoint> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIPEndpoint(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<IPEndPoint> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteIPEndpoint(i), (w, i) => w.WriteIPEndpointAsync(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<Guid> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteGuid(i), BATCH_MEMORY_SIZE_LIMIT / 16);
public async Task WriteCollectionAsync(IEnumerable<Guid> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteGuid(i), (w, i) => w.WriteGuidAsync(i), BATCH_MEMORY_SIZE_LIMIT / 16);
public async Task WriteCollectionAsync(IEnumerable<DateTime> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<DateTime> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), (w, i) => w.WriteDateTimeAsync(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<DateTime?> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<DateTime?> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDateTime(i), (w, i) => w.WriteDateTimeAsync(i), BATCH_MEMORY_SIZE_LIMIT / 9);
public async Task WriteCollectionAsync(IEnumerable<UInt64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteULong(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<UInt64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteULong(i), (w, i) => w.WriteULongAsync(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<UInt32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUInt32(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<UInt32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUInt32(i), (w, i) => w.WriteUInt32Async(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<char> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteChar(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<char> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteChar(i), (w, i) => w.WriteCharAsync(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<short> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteShort(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<short> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteShort(i), (w, i) => w.WriteShortAsync(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<ushort> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUShort(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<ushort> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteUShort(i), (w, i) => w.WriteUShortAsync(i), BATCH_MEMORY_SIZE_LIMIT / 2);
public async Task WriteCollectionAsync(IEnumerable<Int64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteLong(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<Int64> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteLong(i), (w, i) => w.WriteLongAsync(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<Int32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteInt32(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<Int32> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteInt32(i), (w, i) => w.WriteInt32Async(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<float> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteFloat(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<float> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteFloat(i), (w, i) => w.WriteFloatAsync(i), BATCH_MEMORY_SIZE_LIMIT / 4);
public async Task WriteCollectionAsync(IEnumerable<Double> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDouble(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<Double> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDouble(i), (w, i) => w.WriteDoubleAsync(i), BATCH_MEMORY_SIZE_LIMIT / 8);
public async Task WriteCollectionAsync(IEnumerable<bool> collection)
{
if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
WriteBoolean(item);
}
}
else
{
MockCount();
@ -825,6 +850,7 @@ namespace ZeroLevel.Services.Serialization
UpdateCount(count);
}
}
else
{
WriteInt32(0);
@ -834,9 +860,18 @@ namespace ZeroLevel.Services.Serialization
public async Task WriteCollectionAsync(IEnumerable<byte> collection)
{
if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
WriteByte(item);
}
}
else
{
MockCount();
int count = 0;
if (_stream is MemoryStream)
{
@ -869,6 +904,7 @@ namespace ZeroLevel.Services.Serialization
UpdateCount(count);
}
}
else
{
WriteInt32(0);
@ -878,6 +914,16 @@ namespace ZeroLevel.Services.Serialization
public async Task WriteCollectionAsync(IEnumerable<byte[]> collection)
{
if (collection != null)
{
if (_stream.CanSeek == false)
{
WriteInt32(collection.Count());
foreach (var item in collection)
{
WriteBytes(item);
}
}
else
{
MockCount();
@ -900,15 +946,16 @@ namespace ZeroLevel.Services.Serialization
}
UpdateCount(count);
}
}
else
{
WriteInt32(0);
}
}
public async Task WriteCollectionAsync(IEnumerable<decimal> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDecimal(i), BATCH_MEMORY_SIZE_LIMIT / 16);
public async Task WriteCollectionAsync(IEnumerable<decimal> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteDecimal(i), (w, i) => w.WriteDecimalAsync(i), BATCH_MEMORY_SIZE_LIMIT / 16);
public async Task WriteCollectionAsync(IEnumerable<TimeSpan> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteTimeSpan(i), BATCH_MEMORY_SIZE_LIMIT / 16);
public async Task WriteCollectionAsync(IEnumerable<TimeSpan> collection) => await OptimizedWriteCollectionByChunksAsync(collection, (w, i) => w.WriteTimeSpan(i), (w, i) => w.WriteTimeSpanAsync(i), BATCH_MEMORY_SIZE_LIMIT / 16);
#endregion
#region Arrays

@ -22,7 +22,7 @@ namespace ZeroLevel.Services
public byte[] Encrypt(byte[] data)
{
using (Aes aes = new AesManaged())
using (Aes aes = AesManaged.Create())
{
aes.Padding = PaddingMode.PKCS7;
aes.KeySize = AesKeySizeInBits;
@ -44,7 +44,7 @@ namespace ZeroLevel.Services
public byte[] Decrypt(byte[] data)
{
using (Aes aes = new AesManaged())
using (Aes aes = AesManaged.Create())
{
aes.Padding = PaddingMode.PKCS7;
aes.KeySize = AesKeySizeInBits;

@ -6,16 +6,16 @@
</Description>
<Authors>ogoun</Authors>
<Company>ogoun</Company>
<AssemblyVersion>3.4.0.5</AssemblyVersion>
<PackageReleaseNotes>KVDB</PackageReleaseNotes>
<AssemblyVersion>3.4.0.6</AssemblyVersion>
<PackageReleaseNotes>KVDB fixes</PackageReleaseNotes>
<PackageProjectUrl>https://github.com/ogoun/Zero/wiki</PackageProjectUrl>
<Copyright>Copyright Ogoun 2023</Copyright>
<PackageLicenseUrl></PackageLicenseUrl>
<PackageIconUrl></PackageIconUrl>
<RepositoryUrl>https://github.com/ogoun/Zero</RepositoryUrl>
<RepositoryType>git</RepositoryType>
<Version>3.4.0.5</Version>
<FileVersion>3.4.0.5</FileVersion>
<Version>3.4.0.6</Version>
<FileVersion>3.4.0.6</FileVersion>
<Platforms>AnyCPU;x64;x86</Platforms>
<PackageIcon>zero.png</PackageIcon>
<DebugType>full</DebugType>

Loading…
Cancel
Save

Powered by TurnKey Linux.