github.com/igggame/nebulas-go@v2.1.0+incompatible/nbre/core/net_ipc/client/nipc_client.cpp (about)

     1  // Copyright (C) 2018 go-nebulas authors
     2  //
     3  // This file is part of the go-nebulas library.
     4  //
     5  // the go-nebulas library is free software: you can redistribute it and/or
     6  // modify
     7  // it under the terms of the GNU General Public License as published by
     8  // the Free Software Foundation, either version 3 of the License, or
     9  // (at your option) any later version.
    10  //
    11  // the go-nebulas library is distributed in the hope that it will be useful,
    12  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    13  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    14  // GNU General Public License for more details.
    15  //
    16  // You should have received a copy of the GNU General Public License
    17  // along with the go-nebulas library.  If not, see
    18  // <http://www.gnu.org/licenses/>.
    19  //
    20  #include "core/net_ipc/client/nipc_client.h"
    21  #include "common/configuration.h"
    22  #include "common/exception_queue.h"
    23  #include "core/net_ipc/nipc_pkg.h"
    24  
    25  namespace neb {
    26  namespace core {
    27  nipc_client::nipc_client() : m_handling_pkg_num(0) {
    28    m_pkg_handler_thread = std::make_unique<util::wakeable_thread>();
    29  }
    30  
    31  nipc_client::~nipc_client() {
    32    LOG(INFO) << "to destroy nipc client";
    33    shutdown();
    34  }
    35  
    36  bool nipc_client::start() {
    37  
    38    if (m_handlers.empty()) {
    39      LOG(INFO) << "No handlers here";
    40      return false;
    41    }
    42  
    43    bool init_done = false;
    44    std::mutex local_mutex;
    45    std::condition_variable local_cond_var;
    46  
    47    m_thread = std::unique_ptr<std::thread>(new std::thread([&, this]() {
    48      try {
    49        m_got_exception_when_start_ipc = false;
    50  
    51        ::ff::net::net_nervure nn;
    52        ::ff::net::typed_pkg_hub hub;
    53        std::for_each(
    54            m_handlers.begin(), m_handlers.end(),
    55            [&hub](const std::function<void(::ff::net::typed_pkg_hub &)> &f) {
    56              f(hub);
    57            });
    58  
    59        m_to_recv_heart_beat_msg = 0;
    60  
    61        hub.to_recv_pkg<heart_beat_t>([this](std::shared_ptr<heart_beat_t>) {
    62          m_to_recv_heart_beat_msg--;
    63        });
    64  
    65        nn.get_event_handler()->listen<::ff::net::event::tcp_get_connection>(
    66            [&, this](::ff::net::tcp_connection_base *) {
    67              LOG(INFO) << "got connection";
    68              m_is_connected = true;
    69              local_mutex.lock();
    70              init_done = true;
    71              local_mutex.unlock();
    72              local_cond_var.notify_one();
    73            });
    74        nn.get_event_handler()->listen<::ff::net::event::tcp_lost_connection>(
    75            [this, &nn](::ff::net::tcp_connection_base *) {
    76              LOG(INFO) << "lost connection";
    77              m_is_connected = false;
    78              nn.stop();
    79              LOG(INFO) << "nn stopped";
    80              ff::abort_all_tasks_and_quit();
    81              LOG(INFO) << "ff done";
    82              exception_queue::instance().push_back(
    83                  neb_exception::neb_std_exception, "lost connection");
    84            });
    85        nn.add_pkg_hub(hub);
    86        m_conn = nn.add_tcp_client(configuration::instance().nipc_listen(),
    87                                   configuration::instance().nipc_port());
    88  
    89        m_heart_bear_timer = std::make_unique<util::timer_loop>(&nn.ioservice());
    90        m_heart_bear_timer->register_timer_and_callback(3, [this]() {
    91          if (m_to_recv_heart_beat_msg > 2 && m_handling_pkg_num == 0) {
    92            LOG(INFO) << "no heart beat msg, to close";
    93            m_conn->close();
    94            return;
    95          }
    96          m_to_recv_heart_beat_msg++;
    97          std::shared_ptr<heart_beat_t> hb = std::make_shared<heart_beat_t>();
    98          m_conn->send(hb);
    99        });
   100  
   101        while (true) {
   102          if (nn.ioservice().stopped()) {
   103            LOG(INFO) << "ioservice already stopped, wait to restart";
   104            break;
   105          }
   106          try {
   107            nn.run();
   108          } catch (...) {
   109            LOG(INFO) << "to reset ioservice";
   110            nn.ioservice().reset();
   111          }
   112        }
   113      } catch (const std::exception &e) {
   114        m_got_exception_when_start_ipc = true;
   115        LOG(ERROR) << "get exception when start ipc, " << typeid(e).name() << ", "
   116                   << e.what();
   117        local_cond_var.notify_one();
   118      } catch (...) {
   119        m_got_exception_when_start_ipc = true;
   120        LOG(ERROR) << "get unknown exception when start ipc";
   121        local_cond_var.notify_one();
   122      }
   123    }));
   124    std::unique_lock<std::mutex> _l(local_mutex);
   125    if (!init_done) {
   126      LOG(INFO) << "wait to init done cond var";
   127      local_cond_var.wait(_l);
   128    }
   129    if (m_got_exception_when_start_ipc) {
   130      LOG(INFO) << "got exception when client start ipc";
   131      return false;
   132    }
   133    return true;
   134  }
   135  
   136  void nipc_client::shutdown() {
   137    LOG(INFO) << "to shutdown nipc client";
   138    m_conn->close();
   139    if (m_thread) {
   140      m_thread->join();
   141      m_thread.reset();
   142    }
   143  }
   144  } // namespace core
   145  } // namespace neb