public void CommitOffsets()
{
this.EnsuresNotDisposed();
if (this.zkClient == null)
{
return;
}
foreach (KeyValuePair<string, IDictionary<Partition, PartitionTopicInfo>> topic in topicRegistry)
{
var topicDirs = new ZKGroupTopicDirs(this.config.GroupId, topic.Key);
foreach (KeyValuePair<Partition, PartitionTopicInfo> partition in topic.Value)
{
var newOffset = partition.Value.GetConsumeOffset();
try
{
ZkUtils.UpdatePersistentPath(zkClient, topicDirs.ConsumerOffsetDir + "/" + partition.Value.Partition.Name, newOffset.ToString());
}
catch (Exception ex)
{
Logger.WarnFormat(CultureInfo.CurrentCulture, "exception during CommitOffsets: {0}", ex);
}
if (Logger.IsDebugEnabled)
{
Logger.DebugFormat(CultureInfo.CurrentCulture, "Commited offset {0} for topic {1}", newOffset, partition);
}
}
}
}
private void CloseFetchersForQueues( Cluster cluster, IDictionary <string, IList <KafkaStream <TKey, TValue> > > messageStreams, IEnumerable <BlockingCollection <FetchedDataChunk> > queuesToBeCleared) { var allPartitionInfos = parent.topicRegistry.Values.SelectMany(p => p.Values).ToList(); if (parent.fetcher != null) { parent.fetcher.StopConnections(); ClearFetcherQueues(allPartitionInfos, cluster, queuesToBeCleared, messageStreams); Logger.Info("Committing all offsets after clearing the fetcher queues"); /** * here, we need to commit offsets before stopping the consumer from returning any more messages * from the current Data chunk. Since partition ownership is not yet released, this commit offsets * call will ensure that the offsets committed now will be used by the next consumer thread owning the partition * for the current Data chunk. Since the fetchers are already shutdown and this is the last chunk to be iterated * by the consumer, there will be no more messages returned by this iterator until the rebalancing finishes * successfully and the fetchers restart to fetch more Data chunks **/ if (parent.Config.AutoCommitEnable) { parent.CommitOffsets(); } } }