Fix concurrent work. Partition storage

pull/4/head
Ogoun 2 years ago
parent f4e014b0e5
commit cebcb9feb2

@ -57,18 +57,27 @@ namespace ZeroLevel.Services.PartitionStorage
RebuildFileIndexWithSteps(file);
}
}
/// <summary>
/// Delete the index for the specified file
/// </summary>
internal void DropFileIndex(string file)
{
var index_file = Path.Combine(_indexCatalog, Path.GetFileName(file));
_phisicalFileAccessorCachee.DropIndexReader(index_file);
if (File.Exists(index_file))
_phisicalFileAccessorCachee.LockFile(index_file);
try
{
if (File.Exists(index_file))
{
File.Delete(index_file);
}
}
finally
{
File.Delete(index_file);
_phisicalFileAccessorCachee.UnlockFile(index_file);
}
}
/// <summary>
/// Rebuild index with specified number of steps for specified file
/// </summary>
@ -93,17 +102,29 @@ namespace ZeroLevel.Services.PartitionStorage
{
var step = (int)Math.Round(dict.Count / (float)_stepValue, MidpointRounding.ToZero);
var index_file = Path.Combine(_indexCatalog, Path.GetFileName(file));
DropFileIndex(index_file);
var d_arr = dict.OrderBy(p => p.Key).ToArray();
using (var writer = new MemoryStreamWriter(new FileStream(index_file, FileMode.Create, FileAccess.Write, FileShare.None)))
_phisicalFileAccessorCachee.LockFile(index_file);
if (File.Exists(index_file))
{
File.Delete(index_file);
}
try
{
for (int i = 0; i < _stepValue; i++)
var d_arr = dict.OrderBy(p => p.Key).ToArray();
using (var writer = new MemoryStreamWriter(new FileStream(index_file, FileMode.Create, FileAccess.Write, FileShare.None)))
{
var pair = d_arr[i * step];
writer.WriteCompatible(pair.Key);
writer.WriteLong(pair.Value);
for (int i = 0; i < _stepValue; i++)
{
var pair = d_arr[i * step];
writer.WriteCompatible(pair.Key);
writer.WriteLong(pair.Value);
}
}
}
finally
{
_phisicalFileAccessorCachee.UnlockFile(index_file);
}
}
}
/// <summary>
@ -118,24 +139,35 @@ namespace ZeroLevel.Services.PartitionStorage
using (var reader = new MemoryStreamReader(new FileStream(Path.Combine(_dataCatalog, file), FileMode.Open, FileAccess.Read, FileShare.None)))
{
var index_file = Path.Combine(_indexCatalog, Path.GetFileName(file));
DropFileIndex(index_file);
using (var writer = new MemoryStreamWriter(new FileStream(index_file, FileMode.Create, FileAccess.Write, FileShare.None)))
_phisicalFileAccessorCachee.LockFile(index_file);
if (File.Exists(index_file))
{
var counter = 1;
while (reader.EOS == false)
File.Delete(index_file);
}
try
{
using (var writer = new MemoryStreamWriter(new FileStream(index_file, FileMode.Create, FileAccess.Write, FileShare.None)))
{
counter--;
var pos = reader.Position;
var k = _keyDeserializer.Invoke(reader);
_valueDeserializer.Invoke(reader);
if (counter == 0)
var counter = 1;
while (reader.EOS == false)
{
writer.WriteCompatible(k);
writer.WriteLong(pos);
counter = _stepValue;
counter--;
var pos = reader.Position;
var k = _keyDeserializer.Invoke(reader);
_valueDeserializer.Invoke(reader);
if (counter == 0)
{
writer.WriteCompatible(k);
writer.WriteLong(pos);
counter = _stepValue;
}
}
}
}
finally
{
_phisicalFileAccessorCachee.UnlockFile(index_file);
}
}
}
}

@ -124,42 +124,50 @@ namespace ZeroLevel.Services.PartitionStorage.Partition
TKey key;
TInput input;
var dict = new Dictionary<TKey, HashSet<TInput>>();
using (var reader = new MemoryStreamReader(new FileStream(file, FileMode.Open, FileAccess.Read, FileShare.None, 4096 * 1024)))
PhisicalFileAccessorCachee.LockFile(file);
try
{
while (reader.EOS == false)
using (var reader = new MemoryStreamReader(new FileStream(file, FileMode.Open, FileAccess.Read, FileShare.None, 4096 * 1024)))
{
if (false == Serializer.KeyDeserializer.Invoke(reader, out key))
while (reader.EOS == false)
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault read key.");
}
if (false == dict.ContainsKey(key))
{
dict[key] = new HashSet<TInput>();
}
if (reader.EOS)
{
break;
if (false == Serializer.KeyDeserializer.Invoke(reader, out key))
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault read key.");
}
if (false == dict.ContainsKey(key))
{
dict[key] = new HashSet<TInput>();
}
if (reader.EOS)
{
break;
}
if (false == Serializer.InputDeserializer.Invoke(reader, out input))
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault input value.");
}
dict[key].Add(input);
}
if (false == Serializer.InputDeserializer.Invoke(reader, out input))
}
var tempFile = FSUtils.GetAppLocalTemporaryFile();
using (var writer = new MemoryStreamWriter(new FileStream(tempFile, FileMode.Create, FileAccess.Write, FileShare.None, 4096 * 1024)))
{
// sort for search acceleration
foreach (var pair in dict.OrderBy(p => p.Key))
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault input value.");
var v = _options.MergeFunction(pair.Value);
writer.SerializeCompatible(pair.Key);
writer.SerializeCompatible(v);
}
dict[key].Add(input);
}
File.Delete(file);
File.Move(tempFile, file, true);
}
var tempFile = FSUtils.GetAppLocalTemporaryFile();
using (var writer = new MemoryStreamWriter(new FileStream(tempFile, FileMode.Create, FileAccess.Write, FileShare.None, 4096 * 1024)))
finally
{
// sort for search acceleration
foreach (var pair in dict.OrderBy(p => p.Key))
{
var v = _options.MergeFunction(pair.Value);
writer.SerializeCompatible(pair.Key);
writer.SerializeCompatible(v);
}
PhisicalFileAccessorCachee.UnlockFile(file);
}
File.Delete(file);
File.Move(tempFile, file, true);
}
#endregion
}

@ -115,12 +115,20 @@ namespace ZeroLevel.Services.PartitionStorage
// 2. Replace source
var name = Path.GetFileName(file);
var updateFilePath = Path.Combine(folder, name);
if (File.Exists(updateFilePath))
_phisicalFileAccessor.LockFile(updateFilePath);
try
{
if (File.Exists(updateFilePath))
{
File.Delete(updateFilePath);
}
File.Move(file, updateFilePath, true);
}
finally
{
_phisicalFileAccessor.DropDataReader(updateFilePath);
File.Delete(updateFilePath);
_phisicalFileAccessor.UnlockFile(updateFilePath);
}
File.Move(file, updateFilePath, true);
// 3. Rebuil index
(_accessor as BasePartition<TKey, TInput, TValue, TMeta>).RebuildFileIndex(name);

@ -5,6 +5,7 @@ using System.Linq;
using System.Threading;
using System.Threading.Tasks;
using ZeroLevel.Services.FileSystem;
using ZeroLevel.Services.Memory;
using ZeroLevel.Services.PartitionStorage.Interfaces;
using ZeroLevel.Services.PartitionStorage.Partition;
using ZeroLevel.Services.Serialization;
@ -139,49 +140,57 @@ namespace ZeroLevel.Services.PartitionStorage
{
TKey key;
TInput input;
var dict = new Dictionary<TKey, HashSet<TInput>>();
var accessor = PhisicalFileAccessorCachee.GetDataAccessor(file, 0);
if (accessor != null)
PhisicalFileAccessorCachee.LockFile(file);
try
{
using (var reader = new MemoryStreamReader(accessor))
var dict = new Dictionary<TKey, HashSet<TInput>>();
var accessor = new StreamVewAccessor(new FileStream(file, FileMode.Open, FileAccess.Read, FileShare.None, 1024 * 1024 * 32));
if (accessor != null)
{
while (reader.EOS == false)
using (var reader = new MemoryStreamReader(accessor))
{
if (Serializer.KeyDeserializer.Invoke(reader, out key) == false)
while (reader.EOS == false)
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault read key.");
}
if (false == dict.ContainsKey(key))
{
dict[key] = new HashSet<TInput>();
}
if (reader.EOS)
{
break;
}
if (Serializer.InputDeserializer.Invoke(reader, out input) == false)
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault read input value.");
if (Serializer.KeyDeserializer.Invoke(reader, out key) == false)
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault read key.");
}
if (false == dict.ContainsKey(key))
{
dict[key] = new HashSet<TInput>();
}
if (reader.EOS)
{
break;
}
if (Serializer.InputDeserializer.Invoke(reader, out input) == false)
{
throw new Exception($"[StorePartitionBuilder.CompressFile] Fault compress data in file '{file}'. Incorrect file structure. Fault read input value.");
}
dict[key].Add(input);
}
dict[key].Add(input);
}
}
}
var tempFile = FSUtils.GetAppLocalTemporaryFile();
using (var writer = new MemoryStreamWriter(new FileStream(tempFile, FileMode.Create, FileAccess.Write, FileShare.None, 4096 * 1024)))
{
// sort for search acceleration
foreach (var pair in dict.OrderBy(p => p.Key))
var tempFile = FSUtils.GetAppLocalTemporaryFile();
using (var writer = new MemoryStreamWriter(new FileStream(tempFile, FileMode.Create, FileAccess.Write, FileShare.None, 4096 * 1024)))
{
var v = _options.MergeFunction(pair.Value);
writer.SerializeCompatible(pair.Key);
Thread.MemoryBarrier();
writer.SerializeCompatible(v);
// sort for search acceleration
foreach (var pair in dict.OrderBy(p => p.Key))
{
var v = _options.MergeFunction(pair.Value);
writer.SerializeCompatible(pair.Key);
Thread.MemoryBarrier();
writer.SerializeCompatible(v);
}
}
File.Delete(file);
File.Move(tempFile, file, true);
}
finally
{
PhisicalFileAccessorCachee.UnlockFile(file);
}
PhisicalFileAccessorCachee.DropDataReader(file);
File.Delete(file);
File.Move(tempFile, file, true);
}
#endregion
}

@ -1,4 +1,5 @@
using System;
using System.Collections.Generic;
using System.IO;
using ZeroLevel.Services.Cache;
using ZeroLevel.Services.FileSystem;
@ -12,6 +13,8 @@ namespace ZeroLevel.Services.PartitionStorage
private readonly TimerCachee<ParallelFileReader> _indexReadersCachee;
private readonly TimerCachee<ParallelFileReader> _dataReadersCachee;
private readonly HashSet<string> _lockedFiles = new HashSet<string>();
public PhisicalFileAccessorCachee(TimeSpan dataExpirationPeriod, TimeSpan indexExpirationPeriod)
{
_dataReadersCachee = new TimerCachee<ParallelFileReader>(dataExpirationPeriod, s => new ParallelFileReader(s), i => i.Dispose(), 8192);
@ -32,32 +35,40 @@ namespace ZeroLevel.Services.PartitionStorage
}
public IViewAccessor GetDataAccessor(string filePath, long offset)
{
var reader = GetDataReader(filePath);
try
if (false == _lockedFiles.Contains(filePath))
{
var reader = GetDataReader(filePath);
try
{
return reader.GetAccessor(offset);
}
catch (ObjectDisposedException)
{
_dataReadersCachee.Drop(filePath);
reader = _dataReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset);
}
catch (ObjectDisposedException)
{
_dataReadersCachee.Drop(filePath);
reader = _dataReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset);
return null;
}
public IViewAccessor GetDataAccessor(string filePath, long offset, int length)
{
var reader = GetDataReader(filePath);
try
if (false == _lockedFiles.Contains(filePath))
{
var reader = GetDataReader(filePath);
try
{
return reader.GetAccessor(offset, length);
}
catch (ObjectDisposedException)
{
_dataReadersCachee.Drop(filePath);
reader = _dataReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset, length);
}
catch (ObjectDisposedException)
{
_dataReadersCachee.Drop(filePath);
reader = _dataReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset, length);
return null;
}
public void DropAllDataReaders()
{
@ -79,32 +90,40 @@ namespace ZeroLevel.Services.PartitionStorage
}
public IViewAccessor GetIndexAccessor(string filePath, long offset)
{
var reader = GetIndexReader(filePath);
try
if (false == _lockedFiles.Contains(filePath))
{
var reader = GetIndexReader(filePath);
try
{
return reader.GetAccessor(offset);
}
catch (ObjectDisposedException)
{
_indexReadersCachee.Drop(filePath);
reader = _indexReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset);
}
catch (ObjectDisposedException)
{
_indexReadersCachee.Drop(filePath);
reader = _indexReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset);
return null;
}
public IViewAccessor GetIndexAccessor(string filePath, long offset, int length)
{
var reader = GetIndexReader(filePath);
try
if (false == _lockedFiles.Contains(filePath))
{
var reader = GetIndexReader(filePath);
try
{
return reader.GetAccessor(offset, length);
}
catch (ObjectDisposedException)
{
_indexReadersCachee.Drop(filePath);
reader = _indexReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset, length);
}
catch (ObjectDisposedException)
{
_indexReadersCachee.Drop(filePath);
reader = _indexReadersCachee.Get(filePath);
}
return reader.GetAccessor(offset, length);
return null;
}
public void DropAllIndexReaders()
{
@ -112,6 +131,18 @@ namespace ZeroLevel.Services.PartitionStorage
}
#endregion
public void LockFile(string filePath)
{
_lockedFiles.Add(filePath);
DropDataReader(filePath);
DropIndexReader(filePath);
}
public void UnlockFile(string filePath)
{
_lockedFiles.Remove(filePath);
}
public void Dispose()
{
_dataReadersCachee.Dispose();

@ -6,16 +6,16 @@
</Description>
<Authors>ogoun</Authors>
<Company>ogoun</Company>
<AssemblyVersion>3.3.8.9</AssemblyVersion>
<PackageReleaseNotes>Partition storage. Suppress exception when find invoke</PackageReleaseNotes>
<AssemblyVersion>3.3.9.0</AssemblyVersion>
<PackageReleaseNotes>Partition storage. Fix concurrent work</PackageReleaseNotes>
<PackageProjectUrl>https://github.com/ogoun/Zero/wiki</PackageProjectUrl>
<Copyright>Copyright Ogoun 2023</Copyright>
<PackageLicenseUrl></PackageLicenseUrl>
<PackageIconUrl></PackageIconUrl>
<RepositoryUrl>https://github.com/ogoun/Zero</RepositoryUrl>
<RepositoryType>git</RepositoryType>
<Version>3.3.8.9</Version>
<FileVersion>3.3.8.9</FileVersion>
<Version>3.3.9.0</Version>
<FileVersion>3.3.9.0</FileVersion>
<Platforms>AnyCPU;x64;x86</Platforms>
<PackageIcon>zero.png</PackageIcon>
<DebugType>full</DebugType>

Loading…
Cancel
Save

Powered by TurnKey Linux.