I'm using Redis Cache in an azure website. The Cache is hosted in Azure. I was noticing some timeouts when setting values to the cache coming through our monitoring. So I ran some load tests that I'd run before I'd moved from the local server cache to using redis and the results were pretty bad compared to the previous test runs mostly caused by timeouts to redis cache.
I'm using the StackExchange.Redis library version 1.0.333 strong name version.
I was careful not to create a new connection each time I access the cache.
The load test is not actually loading the server up that much and results were 100% successful previously and now get about 50% error rate caused by timeouts.
Code being used to access the cache.
public static class RedisCacheProvider
{
private static ConnectionMultiplexer connection;
private static ConnectionMultiplexer Connection
{
get
{
if (connection == null || !connection.IsConnected)
{
connection = ConnectionMultiplexer.Connect(ConfigurationManager.ConnectionStrings["RedisCache"].ToString());
}
return connection;
}
}
private static IDatabase Cache
{
get
{
return Connection.GetDatabase();
}
}
public static T Get<T>(string key)
{
return Deserialize<T>(Cache.StringGet(key));
}
public static object Get(string key)
{
return Deserialize<object>(Cache.StringGet(key));
}
public static void Set(string key, object value)
{
Cache.StringSet(key, Serialize(value));
}
public static void Remove(string key)
{
Cache.KeyDelete(key);
}
public static void RemoveContains(string contains)
{
var endpoints = Connection.GetEndPoints();
var server = Connection.GetServer(endpoints.First());
var keys = server.Keys();
foreach (var key in keys)
{
if (key.ToString().Contains(contains))
Cache.KeyDelete(key);
}
}
public static void RemoveAll()
{
var endpoints = Connection.GetEndPoints();
var server = Connection.GetServer(endpoints.First());
server.FlushAllDatabases();
}
static byte[] Serialize(object o)
{
if (o == null)
{
return null;
}
BinaryFormatter binaryFormatter = new BinaryFormatter();
using (MemoryStream memoryStream = new MemoryStream())
{
binaryFormatter.Serialize(memoryStream, o);
byte[] objectDataAsStream = memoryStream.ToArray();
return objectDataAsStream;
}
}
static T Deserialize<T>(byte[] stream)
{
if (stream == null)
{
return default(T);
}
BinaryFormatter binaryFormatter = new BinaryFormatter();
using (MemoryStream memoryStream = new MemoryStream(stream))
{
T result = (T)binaryFormatter.Deserialize(memoryStream);
return result;
}
}
}
I have had the same issue recently.
A few points that will improve your situation:
Protobuf-net instead of BinaryFormatter
I recommend using protobuf-net as it will reduce the size of values that you want to store in your cache.
public interface ICacheDataSerializer
{
byte[] Serialize(object o);
T Deserialize<T>(byte[] stream);
}
public class ProtobufNetSerializer : ICacheDataSerializer
{
public byte[] Serialize(object o)
{
using (var memoryStream = new MemoryStream())
{
Serializer.Serialize(memoryStream, o);
return memoryStream.ToArray();
}
}
public T Deserialize<T>(byte[] stream)
{
var memoryStream = new MemoryStream(stream);
return Serializer.Deserialize<T>(memoryStream);
}
}
Implement retry strategy
Implement this RedisCacheTransientErrorDetectionStrategy to handle timeout issues.
using Microsoft.Practices.TransientFaultHandling;
public class RedisCacheTransientErrorDetectionStrategy : ITransientErrorDetectionStrategy
{
/// <summary>
/// Custom Redis Transient Error Detenction Strategy must have been implemented to satisfy Redis exceptions.
/// </summary>
/// <param name="ex"></param>
/// <returns></returns>
public bool IsTransient(Exception ex)
{
if (ex == null) return false;
if (ex is TimeoutException) return true;
if (ex is RedisServerException) return true;
if (ex is RedisException) return true;
if (ex.InnerException != null)
{
return IsTransient(ex.InnerException);
}
return false;
}
}
Instantiate like this:
private readonly RetryPolicy _retryPolicy;
// CODE
var retryStrategy = new FixedInterval(3, TimeSpan.FromSeconds(2));
_retryPolicy = new RetryPolicy<RedisCacheTransientErrorDetectionStrategy>(retryStrategy);
Use like this:
var cachedString = _retryPolicy.ExecuteAction(() => dataCache.StringGet(fullCacheKey));
Review your code to minimize cache calls and values that you are storing in your cache. I reduced lots of errors by storing values more efficiently.
If none of this helps. Move to higher cache (we ended up using C3 instead of C1).
You should not create a new ConnectionMultiplexer if IsConnected is false. The existing multiplexer will reconnect in the background. By creating a new multiplexer and not disposing the old one, you are leaking connections. We recommend the following pattern:
private static Lazy<ConnectionMultiplexer> lazyConnection =
new Lazy<ConnectionMultiplexer>(() => {
return ConnectionMultiplexer.Connect(
"mycache.redis.cache.windows.net,abortConnect=false,ssl=true,password=...");
});
public static ConnectionMultiplexer Connection {
get {
return lazyConnection.Value;
}
}
You can monitor the number of connections to your cache in the Azure portal. If it seems unusually high, this may be what is impacting your performance.
For further assistance, please contact us at 'azurecache#microsoft.com'.
Related
I have been working on a device which is sending some data to an Azure IoT hub
The device is doing this on two different locations in the code. On one side it works perfectly and I can connect to the Hub via Connection String and transport type MQTT_WebSocket_Only.
public static class Mqtt2IoTNew
{
private static string _DeviceConnectionString = Properties.Settings.Default.MqttUri;
private static TransportType _TransportType = TransportType.Mqtt_WebSocket_Only;
public static void Send(object argEntry, bool argIsList)
{
var deviceClient = DeviceClient.CreateFromConnectionString(_DeviceConnectionString, _TransportType);
deviceClient.ReceiveAsync(TimeSpan.FromSeconds(2)).Wait();
var message = new Message(deviceClient, argEntry, argIsList);
message.RunAsync().GetAwaiter().GetResult();
}
}
internal class Message
{
private DeviceClient _DeviceClient;
private readonly string _Message;
public Message(DeviceClient argDeviceClient, object argEntry, bool isList)
{
_DeviceClient = argDeviceClient;
StringBuilder stb = new StringBuilder();
if (isList)
{
foreach (var entity in (List<object>) argEntry)
{
stb.Append("<entity>").Append(JsonConvert.SerializeObject(entity)).Append("</entity>\n");
}
}
else
{
stb.Append(JsonConvert.SerializeObject(argEntry));
}
_Message = stb.ToString();
}
public async Task RunAsync()
{
await SendEvent().ConfigureAwait(false);
}
private async Task SendEvent()
{
Microsoft.Azure.Devices.Client.Message eventMessage = new Microsoft.Azure.Devices.Client.Message(Encoding.UTF8.GetBytes(_Message));
await _DeviceClient.SendEventAsync(eventMessage).ConfigureAwait(false);
}
}
//Call of method that does not work
protected override void DoOnCompleted(IRepository argRepository)
{
if (_CurrentlySendingTreadId.HasValue)
{
if (_CurrentlySendingTreadId.Value == Thread.CurrentThread.ManagedThreadId)
{
return;
}
}
TaskFactoryProvider.GetFactory().StartNew(()=>SendBatchProtocols());
}
public bool SendBatchProtocols()
{
using (var repository = RepositoryProviderHolder.RepositoryProvider.GetRepository(Constants.CONTAINERCONTRACT_PRODUCTIONREPOSITORY))
{
IQueryable<BatchProtocol> batchProtocolQuery = repository.GetQuery<BatchProtocol>().OrderBy(bp => bp.InternalNoInteger);
batchProtocolQuery = batchProtocolQuery.Where(bp => !bp.IsArchived).Take(1);
if (!batchProtocolQuery.Any()) return false;
var batchProtocols = batchProtocolQuery.ToList();
IsBatchProtocolSend = false;
try
{
foreach (var bps in batchProtocols)
{
Mqtt2IoTNew.Send(bps,false);
}
IsBatchProtocolSend = true;
}
catch (Exception ex)
{
throw;
}
}
return IsBatchProtocolSend;
}
//Call of Method that does work
private void AddEntitiesAndSaveChanges(IEnumerable argEntities)
{
if (argEntities == null)
{
return;
}
lock (_UnderlyingRepositoryAccessLockObject)
{
#region Log2DornerIoT
if (Properties.Settings.Default.Log2DornerIoT)
{
List<object> entities = new List<object>();
int i = 0;
foreach (var entity in argEntities)
{
if (i < 100)
{
entities.Add(entity);
i++;
}
else
{
try
{
Mqtt2IoTNew.Send(entities, true);
}
catch (Exception e)
{
throw;
}
entities.Clear();
i = 0;
}
}
}
}
on the other part of the code, I am only colling the same class to use to send method in the same way but here I get an exception which says "TLS authentication error" and the inner exception "Unable to connect to the remote server", "The underlying connection was closed: Could not establish trust relationship for the SSL/TLS secure channel".
But: I never used any kind of authorization not in the first part which works perfectly neither in the second.
I would be very happy if someone could help me. I have found nothing so fare regarding this issue.
Thanks for your time.
Michael
I found the reason why it didn't work. There was a Persmissice Certificate Policy applied that blocked the certificate at one side of the project. I disabled it and now it works perfectly fine.
Thanks for the help anyway.
I'm trying to find out the reason of problems of my .net core application with protobuf serialization, so i've wrote a test application and... there is some strange. There is a memory leak if i use Task and there is no memory leak if i use Thread.
Example with Task:
internal class Program
{
private static void Main(string[] args)
{
var data = new Person
{
Id = 1,
Name = new string('q', 10000),
Address = new Address
{
Line1 = new string('w', 10000),
Line2 = new string('e', 10000)
}
};
//for (var i = 0; i < 100; i++)
//{
// var thread = new Thread(() =>
// {
// while (true)
// try
// {
// var b = ProtoSerialize(data);
// Person serializedPerson;
// using (Stream stream2 = new MemoryStream(b))
// {
// serializedPerson = Serializer.Deserialize<Person>(stream2);
// }
// }
// catch (Exception e)
// {
// Console.WriteLine(e);
// }
// });
// thread.Start();
//}
for (var i = 0; i < 100; i++)
{
new Task(() =>
{
while (true)
{
var b = ProtoSerialize(data);
Person serializedPerson;
using (Stream stream2 = new MemoryStream(b))
{
serializedPerson = Serializer.Deserialize<Person>(stream2);
}
}
}).Start();
}
Console.ReadLine();
}
public static byte[] ProtoSerialize<T>(T record) where T : class
{
using (var stream = new MemoryStream())
{
Serializer.Serialize(stream, record);
return stream.ToArray();
}
}
}
[ProtoContract]
public class Person
{
[ProtoMember(1)]
public int Id { get; set; }
[ProtoMember(2)]
public string Name { get; set; }
[ProtoMember(3)]
public Address Address { get; set; }
}
[ProtoContract]
public class Address
{
[ProtoMember(1)]
public string Line1 { get; set; }
[ProtoMember(2)]
public string Line2 { get; set; }
}
and screenshot of dotMemory:
so if i use threads (commented code)
for (var i = 0; i < 100; i++)
{
var thread = new Thread(() =>
{
while (true)
try
{
var b = ProtoSerialize(data);
Person serializedPerson;
using (Stream stream2 = new MemoryStream(b))
{
serializedPerson = Serializer.Deserialize<Person>(stream2);
}
}
catch (Exception e)
{
Console.WriteLine(e);
}
});
thread.Start();
}
Then there is no leak:
So the question is:
Am i doing something wrong?
Why this happens?
How to do wright?
p.s.
protobuf-net is 2.3.2
.net core 2.0
UPD1: call GC.Collect in main thread changes nothing (there is still memory leak)
Well that's fun. protobuf-net does make limited use of some background pools - there is a [ThreadStatic] instance of ProtoReader, for example - although that is a light object by itself; while it does keep a byte[] pool handy, that is not kept referenced by the [ThreadStatic] instance (the buffer is released to the pool before then). So there's nothing that immediately leaps to mind; have you tried doing a few forced GC.Collects to see if anything is actually leaking, vs simply not been collected yet? Have you tried replacing all the protobuf-net with dummy code (Thread.Sleep(100); for example) to see if you're just seeing the weight of the TPL machinery? Do you know (from that tool) what object types are accounting for the overhead?
It is also possible that this is simply a consequence of the very different threading usage here upsetting GC: the Task code is going to be using the ThreadPool, so a limited number of threads. That is neither good nor bad - just: different.
My unit of work class is mentioned below and I am using Ninject and I have tried injecting IUnitOfWork per request per thread scope, transient etc. but I am still getting error which is:
"Message":"An error has occurred.","ExceptionMessage":"The context cannot be used while the model is being created. This exception may be thrown if the context is used inside the OnModelCreating method or if the same context instance is accessed by multiple threads concurrently. Note that instance members of DbContext and related classes are not guaranteed to be thread safe.","ExceptionType":"System.InvalidOperationException
I get this error when i make two web API (get) calls at the same time using angularJS and it shows error at the point _context.Set<TEntity>().FirstOrDefault(match);
public class UnitOfWork : IUnitOfWork, IDisposable
{
private My_PromotoolEntities _uowDbContext = new My_PromotoolEntities();
private Dictionary<string, object> _repositories;
// Do it like this if no specific class file
private GenericRepository<MysPerson> _personRepository;
//private GenericRepository<MysDataSource> dataSourcesRepository;
//private GenericRepository<MysCountry> countryMasterRepository;
// Or like this if with specific class file.
private DataSourceRepository _dataSourcesRepository;
private CustomerRepository _customerRepository;
private DeviceRepository _deviceRepository;
private DeviceRegistrationRepository _deviceRegistrationRepository;
private EmailQueueRepository _emailQueueRepository;
public void SetContext(My_PromotoolEntities context)
{
_uowDbContext = context;
}
public void CacheThis(object cacheThis, string keyName, TimeSpan howLong)
{
Cacheing.StaticData.CacheStaticData(cacheThis, keyName, howLong);
}
public object GetFromCache(string keyName)
{
return Cacheing.StaticData.GetFromCache(keyName);
}
public GenericRepository<T> GenericRepository<T>() where T : BaseEntity
{
if (_repositories == null)
{
_repositories = new Dictionary<string, object>();
}
var type = typeof(T).Name;
if (!_repositories.ContainsKey(type))
{
var repositoryType = typeof(GenericRepository<>);
var repositoryInstance = Activator.CreateInstance(repositoryType.MakeGenericType(typeof(T)), _uowDbContext);
_repositories.Add(type, repositoryInstance);
}
return (GenericRepository<T>)_repositories[type];
}
public GenericRepository<MysPerson> PersonRepository
{
get
{
if (this._personRepository == null)
{
this._personRepository = new GenericRepository<MysPerson>(_uowDbContext);
}
return _personRepository;
}
}
public DataSourceRepository DataSourcesRepository
{
get
{
if (this._dataSourcesRepository == null)
{
this._dataSourcesRepository = new DataSourceRepository(_uowDbContext);
}
return _dataSourcesRepository;
}
}
public CustomerRepository CustomerRepository
{
get
{
if (this._customerRepository == null)
{
this._customerRepository = new CustomerRepository(_uowDbContext);
}
return _customerRepository;
}
}
public DeviceRepository DeviceRepository
{
get
{
if (this._deviceRepository == null)
{
this._deviceRepository = new DeviceRepository(_uowDbContext);
}
return _deviceRepository;
}
}
public DeviceRegistrationRepository DeviceRegistrationRepository
{
get
{
if (this._deviceRegistrationRepository == null)
{
this._deviceRegistrationRepository = new DeviceRegistrationRepository(_uowDbContext);
}
return _deviceRegistrationRepository;
}
}
public EmailQueueRepository emailQueueRepository
{
get
{
if (this._emailQueueRepository == null)
{
this._emailQueueRepository = new EmailQueueRepository(_uowDbContext);
}
return _emailQueueRepository;
}
}
/// <summary>
/// Commits all changes to the db. Throws exception if fails. Call should be in a try..catch.
/// </summary>
public void Save()
{
try
{
_uowDbContext.SaveChanges();
}
catch (DbEntityValidationException dbevex)
{
// Entity Framework specific errors:
StringBuilder sb = new StringBuilder();
var eve = GetValidationErrors();
if (eve.Count() > 0)
{
eve.ForEach(error => sb.AppendLine(error));
}
ClearContext();
// Throw a new exception with original as inner.
var ex = new Exception(sb.ToString(), dbevex);
ex.Source = "DbEntityValidationException";
throw ex;
}
catch (Exception)
{
ClearContext();
throw;
}
}
private void ClearContext()
{
DetachAll();
}
private void DetachAll()
{
foreach (DbEntityEntry dbEntityEntry in _uowDbContext.ChangeTracker.Entries())
{
if (dbEntityEntry.Entity != null)
{
dbEntityEntry.State = EntityState.Detached;
}
}
}
/// <summary>
/// Checks for EF DbEntityValidationException(s).
/// </summary>
/// <returns>Returns a List of string containing the EF DbEntityValidationException(s).</returns>
public List<string> GetValidationErrors()
{
if (_uowDbContext.GetValidationErrors().Count() != 0)
{
return _uowDbContext.GetValidationErrors().Select(e => string.Join(Environment.NewLine, e.ValidationErrors.Select(v => string.Format("{0} - {1}", v.PropertyName, v.ErrorMessage)))).ToList();
}
return null;
}
private bool disposed = false;
protected virtual void Dispose(bool disposing)
{
if (!this.disposed)
{
if (disposing)
{
_uowDbContext.Dispose();
}
}
this.disposed = true;
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
}
You should never use a context in 2 places at the same time, that's exactly why you are getting this error. From the MSDN documentation:
Thread Safety: Any public static (Shared in Visual Basic) members of this type are thread safe. Any instance members are not guaranteed to be thread safe.
It is a little hard to make suggestions without a repro but there is a brute force approach that should resolve the issue. If you have an interception point before/during DI setup then you can cause all the context initialization etc to happen by creating an instance of your context and calling ctx.Database.Initialize(force: false); Passing 'force: false' will ensure that the initialization still only happens once per AppDomain
Currently my app returns data by MemoryStream, the problem is
the size of data could be large than 500MB, and that takes up much memory before return.
I am seeking for a way to return the data progressively.
For example, flush the output for every 1MB.
First I tried IPartialWriter
public class ViewRenderResult : IPartialWriter
{
public void WritePartialTo(IResponse response)
{
response.Write("XXX");
}
public bool IsPartialRequest { get { return true; } }
}
response.Write can only be called for one time.
Then I found IStreamWriter
public interface IStreamWriter
{
void WriteTo(Stream responseStream);
}
I doubt it caches all data before returining.
Please can anyone clarify it?
This previous answer shows different response types ServiceStack supports, e.g. you can just return a Stream or write to the base.Response.OutputStream directly from within your Service.
These ImageServices also shows the different ways you can write a binary response like an Image to the response stream, e.g. here's an example of using a custom IStreamWriter which lets you control how to write to the Response OutputStream:
//Your own Custom Result, writes directly to response stream
public class ImageResult : IDisposable, IStreamWriter, IHasOptions
{
private readonly Image image;
private readonly ImageFormat imgFormat;
public ImageResult(Image image, ImageFormat imgFormat = null)
{
this.image = image;
this.imgFormat = imgFormat ?? ImageFormat.Png;
this.Options = new Dictionary<string, string> {
{ HttpHeaders.ContentType, this.imgFormat.ToImageMimeType() }
};
}
public void WriteTo(Stream responseStream)
{
using (var ms = new MemoryStream())
{
image.Save(ms, imgFormat);
ms.WriteTo(responseStream);
}
}
public void Dispose()
{
this.image.Dispose();
}
public IDictionary<string, string> Options { get; set; }
}
Which you can return in your Service with:
public object Any(ImageAsCustomResult request)
{
var image = new Bitmap(100, 100);
using (var g = Graphics.FromImage(image))
{
g.Clear(request.Format.ToImageColor());
return new ImageResult(image, request.Format.ToImageFormat());
}
}
I am implementing a Rest service using ServiceStack. We use the repository pattern and auto-wire repositories into services via IOC.
Currently, we have a naive approach where one db model is paired with one repository. This means that whenever more than one entity is manipulated in one service, no transactional boundaries are used. Repositories are invoked sequentially: if one or more steps along the way fail, one has to "rollback" the db to its initial state, manually. Worst case scenario, if the request thread dies, or if an unchecked exception occurs (e.g., OutOfMemoryException) the database will be left in an inconsistent state.
I have a set of hypothetical solutions, but i regard none as adequate:
Open a connection and start a transaction at the Service Level. Invoke repositories, passing them the connection. This is obviously wrong as it goes against all the ddd design guidelines. The whole point is for the upper layers to be completely ignorant about concrete persistence. Moreover, it would mess up unit testing.
Have the first repository starting a transaction. Other dependent repositories would be invoked, but passing the already opened connection. This also sounds like bad design.
Defining aggregates. I'm not a great fan of this one as I'm not a domain modelling expert, and I feel that by introducing aggregates, I am liable to introduce designs errors. One advantage of the current model is that it is simple.
Any one has suggestions for this problem?
Thanks in advance
You can use a pass through class usually called UnitOfWork, where you will open and close the "connection". Search for "Unit of work" you will find many examples. You can customize the below snippet to include transactions.
public class UnitOfWork : IUnitOfWork
{
readonly CompanyDbContext _context;
public UnitOfWork()
{
_context = new CompanyDbContext ();
}
private bool _disposed;
protected virtual void Dispose(bool disposing)
{
if (!_disposed)
{
if (disposing)
{
_context.Dispose();
}
}
_disposed = true;
}
public void Dispose()
{
Dispose(true);
GC.SuppressFinalize(this);
}
public void Save()
{
_context.SaveChanges();
}
public IProductRepository ProductRepository
{
get { return new ProductRepository(_context); }
}
public ICartRepository CartRepository
{
get { return new CartRepository(_context); }
}
}
then you can do multiple transactions like below
using (_unitOfWork)
{
var p = _unitOfWork.ProductRepository.SingleOrDefault(a => a.id ==1);
_unitOfWork.CartRepository.Add(p);
_unitOfWork.Save();
}
In order to use Transactions effectively with Ormlite, you need to create custom DBManager class which can hold the connection object for each thread (use a ThreadStatic). Then you can use this custom DBManager in your repository to call different ormlite function.
Part of the code that I use is (you need modify the code to work properly):
public class ThreadSpecificDBManager : IDisposable, IDBManager
{
[ThreadStatic]
private static int connectionCount = 0;
[ThreadStatic]
private static int transactionCount = 0;
[ThreadStatic]
private static IDbConnection connection = null;
public string ConnectionString { get; set; }
public IDbConnection Connection { get { EnsureOpenConnection(); return connection; } }
static ThreadSpecificDBManager()
{
}
private void EnsureOpenConnection()
{
if ((connection == null) || (connection.State == ConnectionState.Closed))
{
OrmLiteConfig.TSTransaction = null;
transactionCount = 0;
connectionCount = 0;
connection = (DbConnection)ConnectionString.OpenDbConnection();
//if (ConfigBase.EnableWebProfiler == true)
// connection = new ProfiledDbConnection((DbConnection)connection, MiniProfiler.Current);
}
}
public ThreadSpecificDBManager(string connectionString)
{
ConnectionString = connectionString;
connectionCount++;
EnsureOpenConnection();
}
public void Dispose()
{
if (transactionCount > 0)
{
//Log.Error("Uncommitted Transaction is left");
}
connectionCount--;
if (connectionCount < 1)
{
if ((connection != null) && (connection.State == ConnectionState.Open))
connection.Close();
if (connection != null)
connection.Dispose();
connection = null;
}
}
public void BeginTransaction()
{
if (transactionCount == 0)
{
//Log.SqlBeginTransaction(0, true);
OrmLiteConfig.TSTransaction = Connection.BeginTransaction();
}
else
{
//Log.SqlBeginTransaction(transactionCount, false);
}
transactionCount = transactionCount + 1;
}
public void RollbackTransaction()
{
try
{
if (transactionCount == 0)
{
//Log.SqlError("Transaction Rollback called without a begin transaction call.");
return;
}
if (OrmLiteConfig.TSTransaction == null)
{
//Log.SqlError("Transaction is not saved in the Thread Safe variable- so it cannot be rollbacked.");
throw new Exception("Transaction is not saved in the Thread Safe variable- so it cannot be rollbacked.");
}
if (transactionCount == 1)
{
transactionCount = 0;
try
{
//Log.SqlRollbackTransaction(transactionCount, true);
OrmLiteConfig.TSTransaction.Rollback();
}
catch (Exception ex1)
{
//Log.SqlError(ex1,"Error when rolling back the transaction");
}
OrmLiteConfig.TSTransaction.Dispose();
OrmLiteConfig.TSTransaction = null;
}
else
{
//Log.SqlRollbackTransaction(transactionCount, false);
transactionCount = transactionCount - 1;
}
}
finally
{
}
}
public void CommitTransaction()
{
try
{
if (transactionCount == 0)
{
//Log.SqlError("Transaction Rollback called without a begin transaction call.");
return;
}
if (OrmLiteConfig.TSTransaction == null)
{
//Log.SqlError("Transaction is not saved in the Thread Safe variable- so it cannot be rollbacked.");
throw new Exception("Transaction is not saved in the Thread Safe variable- so it cannot be rollbacked.");
}
if (transactionCount == 1)
{
//Log.SqlCommitTransaction(transactionCount,true);
transactionCount = 0;
OrmLiteConfig.TSTransaction.Commit();
OrmLiteConfig.TSTransaction.Dispose();
OrmLiteConfig.TSTransaction = null;
}
else
{
//Log.SqlCommitTransaction(transactionCount, false);
transactionCount = transactionCount - 1 ;
}
}
finally
{
}
}
}