// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
using System;
using System.Threading;
using System.Runtime.InteropServices;
using System.Runtime.CompilerServices;
using System.Diagnostics;
namespace FASTER.core
{
///
/// Epoch protection
///
public unsafe class LightEpoch
{
///
/// Default invalid index entry.
///
private const int kInvalidIndex = 0;
///
/// Default number of entries in the entries table
///
public const int kTableSize = 128;
///
/// Default drainlist size
///
private const int kDrainListSize = 16;
///
/// Thread protection status entries.
///
private Entry[] tableRaw;
private GCHandle tableHandle;
private Entry* tableAligned;
private static Entry[] threadIndex;
private static GCHandle threadIndexHandle;
private static Entry* threadIndexAligned;
///
/// List of action, epoch pairs containing actions to performed
/// when an epoch becomes safe to reclaim.
///
private int drainCount = 0;
private readonly EpochActionPair[] drainList = new EpochActionPair[kDrainListSize];
///
/// A thread's entry in the epoch table.
///
[ThreadStatic]
private static int threadEntryIndex;
///
/// Number of instances using this entry
///
[ThreadStatic]
private static int threadEntryIndexCount;
[ThreadStatic]
static int threadId;
///
/// Global current epoch value
///
public int CurrentEpoch;
///
/// Cached value of latest epoch that is safe to reclaim
///
public int SafeToReclaimEpoch;
///
/// Static constructor to setup shared cache-aligned space
/// to store per-entry count of instances using that entry
///
static LightEpoch()
{
// Over-allocate to do cache-line alignment
threadIndex = new Entry[kTableSize + 2];
threadIndexHandle = GCHandle.Alloc(threadIndex, GCHandleType.Pinned);
long p = (long)threadIndexHandle.AddrOfPinnedObject();
// Force the pointer to align to 64-byte boundaries
long p2 = (p + (Constants.kCacheLineBytes - 1)) & ~(Constants.kCacheLineBytes - 1);
threadIndexAligned = (Entry*)p2;
}
///
/// Instantiate the epoch table
///
public LightEpoch()
{
// Over-allocate to do cache-line alignment
tableRaw = new Entry[kTableSize + 2];
tableHandle = GCHandle.Alloc(tableRaw, GCHandleType.Pinned);
long p = (long)tableHandle.AddrOfPinnedObject();
// Force the pointer to align to 64-byte boundaries
long p2 = (p + (Constants.kCacheLineBytes - 1)) & ~(Constants.kCacheLineBytes - 1);
tableAligned = (Entry*)p2;
CurrentEpoch = 1;
SafeToReclaimEpoch = 0;
for (int i = 0; i < kDrainListSize; i++)
drainList[i].epoch = int.MaxValue;
drainCount = 0;
}
///
/// Clean up epoch table
///
public void Dispose()
{
tableHandle.Free();
tableAligned = null;
tableRaw = null;
CurrentEpoch = 1;
SafeToReclaimEpoch = 0;
}
///
/// Check whether current thread is protected
///
/// Result of the check
public bool IsProtected()
{
return kInvalidIndex != threadEntryIndex;
}
///
/// Enter the thread into the protected code region
///
/// Current epoch
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public int ProtectAndDrain()
{
int entry = threadEntryIndex;
(*(tableAligned + entry)).threadId = threadEntryIndex;
(*(tableAligned + entry)).localCurrentEpoch = CurrentEpoch;
if (drainCount > 0)
{
Drain((*(tableAligned + entry)).localCurrentEpoch);
}
return (*(tableAligned + entry)).localCurrentEpoch;
}
///
/// Check and invoke trigger actions that are ready
///
/// Next epoch
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private void Drain(int nextEpoch)
{
ComputeNewSafeToReclaimEpoch(nextEpoch);
for (int i = 0; i < kDrainListSize; i++)
{
var trigger_epoch = drainList[i].epoch;
if (trigger_epoch <= SafeToReclaimEpoch)
{
if (Interlocked.CompareExchange(ref drainList[i].epoch, int.MaxValue - 1, trigger_epoch) == trigger_epoch)
{
var trigger_action = drainList[i].action;
drainList[i].action = null;
drainList[i].epoch = int.MaxValue;
trigger_action();
if (Interlocked.Decrement(ref drainCount) == 0) break;
}
}
}
}
///
/// Thread acquires its epoch entry
///
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Acquire()
{
if (threadEntryIndex == kInvalidIndex)
threadEntryIndex = ReserveEntryForThread();
threadEntryIndexCount++;
}
///
/// Thread releases its epoch entry
///
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Release()
{
int entry = threadEntryIndex;
(*(tableAligned + entry)).localCurrentEpoch = 0;
(*(tableAligned + entry)).threadId = 0;
threadEntryIndexCount--;
if (threadEntryIndexCount == 0)
{
(threadIndexAligned + threadEntryIndex)->threadId = 0;
threadEntryIndex = kInvalidIndex;
}
}
///
/// Thread suspends its epoch entry
///
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Suspend()
{
Release();
}
///
/// Thread resumes its epoch entry
///
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public void Resume()
{
Acquire();
ProtectAndDrain();
}
///
/// Increment global current epoch
///
///
public int BumpCurrentEpoch()
{
int nextEpoch = Interlocked.Add(ref CurrentEpoch, 1);
if (drainCount > 0)
Drain(nextEpoch);
return nextEpoch;
}
///
/// Increment current epoch and associate trigger action
/// with the prior epoch
///
/// Trigger action
///
public int BumpCurrentEpoch(Action onDrain)
{
int PriorEpoch = BumpCurrentEpoch() - 1;
int i = 0, j = 0;
while (true)
{
if (drainList[i].epoch == int.MaxValue)
{
if (Interlocked.CompareExchange(ref drainList[i].epoch, int.MaxValue - 1, int.MaxValue) == int.MaxValue)
{
drainList[i].action = onDrain;
drainList[i].epoch = PriorEpoch;
Interlocked.Increment(ref drainCount);
break;
}
}
else
{
var triggerEpoch = drainList[i].epoch;
if (triggerEpoch <= SafeToReclaimEpoch)
{
if (Interlocked.CompareExchange(ref drainList[i].epoch, int.MaxValue - 1, triggerEpoch) == triggerEpoch)
{
var triggerAction = drainList[i].action;
drainList[i].action = onDrain;
drainList[i].epoch = PriorEpoch;
triggerAction();
break;
}
}
}
if (++i == kDrainListSize)
{
ProtectAndDrain();
i = 0;
if (++j == 500)
{
j = 0;
Debug.WriteLine("Delay finding a free entry in the drain list");
}
}
}
ProtectAndDrain();
return PriorEpoch + 1;
}
///
/// Looks at all threads and return the latest safe epoch
///
/// Current epoch
/// Safe epoch
private int ComputeNewSafeToReclaimEpoch(int currentEpoch)
{
int oldestOngoingCall = currentEpoch;
for (int index = 1; index <= kTableSize; ++index)
{
int entry_epoch = (*(tableAligned + index)).localCurrentEpoch;
if (0 != entry_epoch)
{
if (entry_epoch < oldestOngoingCall)
{
oldestOngoingCall = entry_epoch;
}
}
}
// The latest safe epoch is the one just before
// the earliest unsafe epoch.
SafeToReclaimEpoch = oldestOngoingCall - 1;
return SafeToReclaimEpoch;
}
///
/// Reserve entry for thread. This method relies on the fact that no
/// thread will ever have ID 0.
///
/// Start index
/// Thread id
/// Reserved entry
private static int ReserveEntry(int startIndex, int threadId)
{
int current_iteration = 0;
for (; ; )
{
// Reserve an entry in the table.
for (int i = 0; i < kTableSize; ++i)
{
int index_to_test = 1 + ((startIndex + i) & (kTableSize - 1));
if (0 == (threadIndexAligned + index_to_test)->threadId)
{
bool success =
(0 == Interlocked.CompareExchange(
ref (threadIndexAligned+index_to_test)->threadId,
threadId, 0));
if (success)
{
return (int)index_to_test;
}
}
++current_iteration;
}
if (current_iteration > (kTableSize * 10))
{
throw new Exception("Unable to reserve an epoch entry, try increasing the epoch table size (kTableSize)");
}
}
}
///
/// Allocate a new entry in epoch table. This is called
/// once for a thread.
///
/// Reserved entry
private static int ReserveEntryForThread()
{
if (threadId == 0) // run once per thread for performance
{
// For portability(run on non-windows platform)
threadId = Environment.OSVersion.Platform == PlatformID.Win32NT ? (int)Native32.GetCurrentThreadId() : Thread.CurrentThread.ManagedThreadId;
}
int startIndex = Utility.Murmur3(threadId);
return ReserveEntry(startIndex, threadId);
}
///
/// Epoch table entry (cache line size).
///
[StructLayout(LayoutKind.Explicit, Size = Constants.kCacheLineBytes)]
private struct Entry
{
///
/// Thread-local value of epoch
///
[FieldOffset(0)]
public int localCurrentEpoch;
///
/// ID of thread associated with this entry.
///
[FieldOffset(4)]
public int threadId;
[FieldOffset(8)]
public int reentrant;
[FieldOffset(12)]
public fixed int markers[13];
};
private struct EpochActionPair
{
public long epoch;
public Action action;
}
///
/// Mechanism for threads to mark some activity as completed until
/// some version by this thread, and check if all active threads
/// have completed the same activity until that version.
///
/// ID of activity
/// Version
///
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public bool MarkAndCheckIsComplete(int markerIdx, int version)
{
int entry = threadEntryIndex;
if (kInvalidIndex == entry)
{
Debug.WriteLine("New Thread entered during CPR");
Debug.Assert(false);
}
(*(tableAligned + entry)).markers[markerIdx] = version;
// check if all threads have reported complete
for (int index = 1; index <= kTableSize; ++index)
{
int entry_epoch = (*(tableAligned + index)).localCurrentEpoch;
int fc_version = (*(tableAligned + index)).markers[markerIdx];
if (0 != entry_epoch)
{
if (fc_version != version && entry_epoch < int.MaxValue)
{
return false;
}
}
}
return true;
}
}
}