Bend.Repl.ReplHandler.worker_logResume C# (CSharp) Method

worker_logResume() private method

private worker_logResume ( ) : void
return void
        private void worker_logResume()
        {
            bool can_log_replay = true;
            bool need_resolve = false;
            IReplConnection srvr;
            List<string> rebuild_reasons = new List<string>();
            try {
                srvr = pusher.getRandomSeed();
            } catch (ReplPusher.NoServersAvailableException) {
                // TODO: How will the first resume decide he is current enough to go active if there is
                //   no Seed?
                Console.WriteLine("Repl({0}): no servers available for log resume...",
                    ctx.server_guid);
                return;
            }
            // (0) check that our data-instance-ids match

            if (srvr.getDataInstanceId().CompareTo(this.data_instance_id) != 0) {
                this.state = ReplState.error;
                return;
            }

            // (1) see if we can resume from our commit_head pointers
            var our_log_status_dict = new Dictionary<string, LogStatus>();
            List<LogStatus> client_log_status = this.getStatusForLogs().ToList();

            Console.WriteLine("worker_logResume({0}) - ourlogs: {1}",
                ctx.server_guid,String.Join(",",client_log_status));

            foreach (var ls in client_log_status) {
                our_log_status_dict[ls.server_guid] = ls;
            }

            List<LogStatus> srvr_log_status = srvr.getStatusForLogs().ToList();
            Console.WriteLine("worker_logResume({0}) - serverlogs({1}): {2}",
                ctx.server_guid, srvr.getServerGuid(),
                String.Join(",",srvr_log_status));
            foreach (var ls in srvr_log_status) {
                if (!our_log_status_dict.ContainsKey(ls.server_guid)) {
                    // we are missing an entire log...
                    if (ls.oldest_entry_pointer.GetLong().Equals(0)) {
                        // it's the magic start of log pointer, so we can resume from it
                    } else {
                        // otherwise, we need a full rebuild!
                        rebuild_reasons.Add(String.Format("we are entirely missing log data: {0}", ls));
                        can_log_replay = false;
                    }
                } else {
                    // if our log_head is before their oldest_entry, we need a full rebuild!
                    var our_ls = our_log_status_dict[ls.server_guid];
                    if (our_ls.log_commit_head.CompareTo(ls.oldest_entry_pointer) < 0) {
                        rebuild_reasons.Add(String.Format("log:{0}, our log_head:{1} < their oldest:{2}",
                            ls.server_guid,our_ls.log_commit_head,ls.oldest_entry_pointer));
                        can_log_replay = false;
                    }
                    if (our_ls.log_commit_head.CompareTo(ls.log_commit_head) > 0) {
                        // we have newer log entries than they do for at least one log!!
                        rebuild_reasons.Add(String.Format("log:{0}, our log_head:{1} > their head:{2} need resolve",
                            ls.server_guid, our_ls.log_commit_head, ls.log_commit_head));

                        need_resolve = true;
                    }

                }

            }

            if (!can_log_replay) {

                if (!need_resolve) {
                    // stop all the fetchers!
                    this._stopFetchers();

                    // schedule a full rebuild
                    Console.WriteLine("Repl({0}) logs don't match, we need a full rebuild. Reasons: {1}",
                        ctx.server_guid, String.Join(",",rebuild_reasons));
                    this.state = ReplState.rebuild;
                    return;
                } else {
                    // TODO: do we really need to do anything?
                    Console.WriteLine("Repl({0}) our log has newer changes than somebody, we expect he'll resolve with us. Reasons: {1}",
                        ctx.server_guid, String.Join(",",rebuild_reasons));
                    // this.state = ReplState.resolve;
                    // return;
                }
            }

            bool all_caught_up = true;
            // (3) make sure we have a fetcher for every log_server_guid
            // (4) check the to see if we're caught up on all logs

            foreach (var ls in srvr_log_status) {
                if (ls.server_guid.Equals(this.getServerGuid())) {
                    // don't try to fetch entries for our own log.
                    // TODO: check for agreement about our log entries.
                    continue;
                }

                // make sure we have a fetcher...
                lock (this.fetcher_for_logserverguid) {
                    if (!this.fetcher_for_logserverguid.ContainsKey(ls.server_guid)) {
                        this.fetcher_for_logserverguid[ls.server_guid] = new ReplLogFetcher(this, ls.server_guid);
                    }
                    if (!this.fetcher_for_logserverguid[ls.server_guid].IsCaughtUp) {
                        all_caught_up = false;
                    }
                }
            }

            // if we're all caught up, and we're currently not active, make us active!!
            if (all_caught_up && (this.state != ReplState.active)) {
                Console.WriteLine("** Server {0} becoming ACTIVE!!", ctx.server_guid);
                state = ReplState.active;  // we are up to date and online!!
            }

            // TODO: if we're NOT all caught up, we should go back to inactive!
        }