- Fix peer timer calling refreshEncryptionState() instead of refreshPeerInfo(), so the Network tab now auto-updates every 5s - Reorder RPC error handling so warmup messages (Loading block index, Verifying blocks, etc.) display in the status bar instead of being masked by the generic "Waiting for dragonxd" message
1727 lines
71 KiB
C++
1727 lines
71 KiB
C++
// DragonX Wallet - ImGui Edition
|
|
// Copyright 2024-2026 The Hush Developers
|
|
// Released under the GPLv3
|
|
//
|
|
// app_network.cpp — RPC connection, data refresh, and network operations.
|
|
// Split from app.cpp for maintainability.
|
|
//
|
|
// Connection state machine:
|
|
//
|
|
// [Disconnected]
|
|
// │
|
|
// ▼ tryConnect() every 5s
|
|
// Auto-detect DRAGONX.conf (host, port, rpcuser, rpcpassword)
|
|
// │
|
|
// ├─ no config found ──► start embedded daemon ──► retry
|
|
// │
|
|
// ▼ post async rpc_->connect() to worker_
|
|
// [Connecting]
|
|
// │
|
|
// ├─ success ──► onConnected() ──► [Connected]
|
|
// │ │
|
|
// │ ▼ refreshData() every 5s
|
|
// │ [Running]
|
|
// │ │
|
|
// │ ├─ RPC error ──► onDisconnected()
|
|
// │ │ │
|
|
// ├─ auth 401 ──► .cookie auth ──► retry│ ▼
|
|
// │ │ [Disconnected]
|
|
// └─ failure ──► onDisconnected(reason) ┘
|
|
// may restart daemon
|
|
|
|
#include "app.h"
|
|
#include "rpc/rpc_client.h"
|
|
#include "rpc/rpc_worker.h"
|
|
#include "rpc/connection.h"
|
|
#include "config/settings.h"
|
|
#include "daemon/embedded_daemon.h"
|
|
#include "daemon/xmrig_manager.h"
|
|
#include "ui/notifications.h"
|
|
#include "default_banlist_embedded.h"
|
|
#include "util/platform.h"
|
|
#include "util/perf_log.h"
|
|
|
|
#include <nlohmann/json.hpp>
|
|
#include <curl/curl.h>
|
|
#include <fstream>
|
|
|
|
namespace dragonx {
|
|
|
|
using json = nlohmann::json;
|
|
|
|
// ============================================================================
|
|
// Connection Management
|
|
// ============================================================================
|
|
|
|
void App::tryConnect()
|
|
{
|
|
if (connection_in_progress_) return;
|
|
|
|
static int connect_attempt = 0;
|
|
++connect_attempt;
|
|
|
|
connection_in_progress_ = true;
|
|
connection_status_ = "Loading configuration...";
|
|
|
|
// Auto-detect configuration (file I/O — fast, safe on main thread)
|
|
auto config = rpc::Connection::autoDetectConfig();
|
|
|
|
if (config.rpcuser.empty() || config.rpcpassword.empty()) {
|
|
connection_in_progress_ = false;
|
|
std::string confPath = rpc::Connection::getDefaultConfPath();
|
|
VERBOSE_LOGF("[connect #%d] No valid config — DRAGONX.conf missing or no rpcuser/rpcpassword (looked at: %s)\n",
|
|
connect_attempt, confPath.c_str());
|
|
|
|
// If we already know an external daemon is on the port, just wait
|
|
// for the config file to appear (the daemon creates it on first run).
|
|
if (embedded_daemon_ && embedded_daemon_->externalDaemonDetected()) {
|
|
connection_status_ = "Waiting for daemon config...";
|
|
VERBOSE_LOGF("[connect #%d] External daemon detected on port, waiting for config file to appear\n", connect_attempt);
|
|
core_timer_ = CORE_INTERVAL_DEFAULT - 1.0f;
|
|
return;
|
|
}
|
|
|
|
connection_status_ = "No DRAGONX.conf found";
|
|
|
|
// Try to start embedded daemon if enabled
|
|
if (use_embedded_daemon_ && !isEmbeddedDaemonRunning()) {
|
|
connection_status_ = "Starting dragonxd...";
|
|
if (startEmbeddedDaemon()) {
|
|
// Will retry connection after daemon starts
|
|
VERBOSE_LOGF("[connect #%d] Embedded daemon starting, will retry connection...\n", connect_attempt);
|
|
core_timer_ = CORE_INTERVAL_DEFAULT - 1.0f;
|
|
} else if (embedded_daemon_ && embedded_daemon_->externalDaemonDetected()) {
|
|
connection_status_ = "Waiting for daemon config...";
|
|
VERBOSE_LOGF("[connect #%d] External daemon detected but no config yet, will retry...\n", connect_attempt);
|
|
core_timer_ = CORE_INTERVAL_DEFAULT - 1.0f;
|
|
} else {
|
|
VERBOSE_LOGF("[connect #%d] startEmbeddedDaemon() failed — lastError: %s, binary: %s\n",
|
|
connect_attempt,
|
|
embedded_daemon_ ? embedded_daemon_->getLastError().c_str() : "(no daemon object)",
|
|
daemon::EmbeddedDaemon::findDaemonBinary().c_str());
|
|
}
|
|
} else if (!use_embedded_daemon_) {
|
|
VERBOSE_LOGF("[connect #%d] Embedded daemon disabled (using external). No config found at %s\n",
|
|
connect_attempt, confPath.c_str());
|
|
}
|
|
return;
|
|
}
|
|
|
|
connection_status_ = "Connecting to dragonxd...";
|
|
VERBOSE_LOGF("[connect #%d] Connecting to %s:%s (user=%s)\n",
|
|
connect_attempt, config.host.c_str(), config.port.c_str(), config.rpcuser.c_str());
|
|
|
|
// Run the blocking rpc_->connect() on the worker thread so the UI
|
|
// stays responsive (curl connect timeout can be up to 10 seconds).
|
|
if (!worker_) {
|
|
connection_in_progress_ = false;
|
|
VERBOSE_LOGF("[connect #%d] No worker thread available!\n", connect_attempt);
|
|
return;
|
|
}
|
|
|
|
// Capture daemon state before posting to worker
|
|
bool daemonStarting = embedded_daemon_ &&
|
|
(embedded_daemon_->getState() == daemon::EmbeddedDaemon::State::Starting ||
|
|
embedded_daemon_->getState() == daemon::EmbeddedDaemon::State::Running);
|
|
bool externalDetected = embedded_daemon_ && embedded_daemon_->externalDaemonDetected();
|
|
int attempt = connect_attempt;
|
|
|
|
// Log detailed daemon state for diagnostics
|
|
if (embedded_daemon_) {
|
|
const char* stateStr = "unknown";
|
|
switch (embedded_daemon_->getState()) {
|
|
case daemon::EmbeddedDaemon::State::Stopped: stateStr = "Stopped"; break;
|
|
case daemon::EmbeddedDaemon::State::Starting: stateStr = "Starting"; break;
|
|
case daemon::EmbeddedDaemon::State::Running: stateStr = "Running"; break;
|
|
case daemon::EmbeddedDaemon::State::Stopping: stateStr = "Stopping"; break;
|
|
case daemon::EmbeddedDaemon::State::Error: stateStr = "Error"; break;
|
|
}
|
|
VERBOSE_LOGF("[connect #%d] Daemon state: %s, running: %s, external: %s, crashes: %d, lastErr: %s\n",
|
|
attempt, stateStr,
|
|
embedded_daemon_->isRunning() ? "yes" : "no",
|
|
externalDetected ? "yes" : "no",
|
|
embedded_daemon_->getCrashCount(),
|
|
embedded_daemon_->getLastError().empty() ? "(none)" : embedded_daemon_->getLastError().c_str());
|
|
} else {
|
|
VERBOSE_LOGF("[connect #%d] No embedded daemon object (use_embedded=%s)\n",
|
|
attempt, use_embedded_daemon_ ? "yes" : "no");
|
|
}
|
|
|
|
worker_->post([this, config, daemonStarting, externalDetected, attempt]() -> rpc::RPCWorker::MainCb {
|
|
bool connected = rpc_->connect(config.host, config.port, config.rpcuser, config.rpcpassword);
|
|
std::string connectErr = rpc_->getLastConnectError();
|
|
|
|
return [this, config, connected, daemonStarting, externalDetected, attempt, connectErr]() {
|
|
if (connected) {
|
|
VERBOSE_LOGF("[connect #%d] Connected successfully\n", attempt);
|
|
saved_config_ = config; // save for fast-lane connection
|
|
onConnected();
|
|
} else {
|
|
// HTTP 401 = authentication failure. The daemon is running
|
|
// but our rpcuser/rpcpassword don't match. Don't retry
|
|
// endlessly — tell the user what's wrong.
|
|
bool authFailure = (connectErr.find("401") != std::string::npos);
|
|
if (authFailure) {
|
|
// Try .cookie auth as fallback — the daemon may have
|
|
// generated a .cookie file instead of using DRAGONX.conf credentials
|
|
std::string dataDir = rpc::Connection::getDefaultDataDir();
|
|
std::string cookieUser, cookiePass;
|
|
if (rpc::Connection::readAuthCookie(dataDir, cookieUser, cookiePass)) {
|
|
VERBOSE_LOGF("[connect #%d] HTTP 401 — retrying with .cookie auth from %s\n",
|
|
attempt, dataDir.c_str());
|
|
worker_->post([this, config, cookieUser, cookiePass, attempt]() -> rpc::RPCWorker::MainCb {
|
|
auto cookieConfig = config;
|
|
cookieConfig.rpcuser = cookieUser;
|
|
cookieConfig.rpcpassword = cookiePass;
|
|
bool ok = rpc_->connect(cookieConfig.host, cookieConfig.port, cookieConfig.rpcuser, cookieConfig.rpcpassword);
|
|
return [this, cookieConfig, ok, attempt]() {
|
|
connection_in_progress_ = false;
|
|
if (ok) {
|
|
VERBOSE_LOGF("[connect #%d] Connected via .cookie auth\n", attempt);
|
|
saved_config_ = cookieConfig;
|
|
onConnected();
|
|
} else {
|
|
state_.connected = false;
|
|
connection_status_ = "Auth failed — check rpcuser/rpcpassword";
|
|
VERBOSE_LOGF("[connect #%d] .cookie auth also failed\n", attempt);
|
|
ui::Notifications::instance().error(
|
|
"RPC authentication failed (HTTP 401). "
|
|
"The rpcuser/rpcpassword in DRAGONX.conf don't match the running daemon. "
|
|
"Restart the daemon or correct the credentials.");
|
|
}
|
|
};
|
|
});
|
|
return; // async retry in progress
|
|
}
|
|
state_.connected = false;
|
|
std::string confPath = rpc::Connection::getDefaultConfPath();
|
|
connection_status_ = "Auth failed — check rpcuser/rpcpassword";
|
|
VERBOSE_LOGF("[connect #%d] HTTP 401 — rpcuser/rpcpassword in %s don't match the daemon. "
|
|
"Edit the file or restart the daemon to regenerate credentials.\n",
|
|
attempt, confPath.c_str());
|
|
ui::Notifications::instance().error(
|
|
"RPC authentication failed (HTTP 401). "
|
|
"The rpcuser/rpcpassword in DRAGONX.conf don't match the running daemon. "
|
|
"Restart the daemon or correct the credentials.");
|
|
} else if (connectErr.find("Loading") != std::string::npos ||
|
|
connectErr.find("Verifying") != std::string::npos ||
|
|
connectErr.find("Activating") != std::string::npos ||
|
|
connectErr.find("Rewinding") != std::string::npos ||
|
|
connectErr.find("Rescanning") != std::string::npos ||
|
|
connectErr.find("Pruning") != std::string::npos) {
|
|
// Daemon is reachable but still in warmup (Loading block index, etc.)
|
|
// Check this BEFORE daemonStarting so the actual warmup status is shown.
|
|
state_.connected = false;
|
|
connection_status_ = connectErr;
|
|
VERBOSE_LOGF("[connect #%d] Daemon warmup: %s\n", attempt, connectErr.c_str());
|
|
core_timer_ = CORE_INTERVAL_DEFAULT - 1.0f;
|
|
} else if (daemonStarting) {
|
|
state_.connected = false;
|
|
// Show the actual RPC error alongside the waiting message so
|
|
// auth mismatches and timeouts aren't silently hidden.
|
|
if (!connectErr.empty()) {
|
|
connection_status_ = "Waiting for dragonxd — " + connectErr;
|
|
} else {
|
|
connection_status_ = "Waiting for dragonxd to start...";
|
|
}
|
|
VERBOSE_LOGF("[connect #%d] RPC connection failed (%s) — daemon still starting, will retry...\n",
|
|
attempt, connectErr.c_str());
|
|
core_timer_ = CORE_INTERVAL_DEFAULT - 1.0f;
|
|
} else if (externalDetected) {
|
|
} else {
|
|
onDisconnected("Connection failed");
|
|
VERBOSE_LOGF("[connect #%d] RPC connection failed — no daemon starting, no external detected\n", attempt);
|
|
|
|
if (use_embedded_daemon_ && !isEmbeddedDaemonRunning()) {
|
|
// Prevent infinite crash-restart loop
|
|
if (embedded_daemon_ && embedded_daemon_->getCrashCount() >= 3) {
|
|
connection_status_ = "Daemon crashed " + std::to_string(embedded_daemon_->getCrashCount()) + " times";
|
|
VERBOSE_LOGF("[connect #%d] Daemon crashed %d times — not restarting (use Settings > Restart Daemon to retry)\n",
|
|
attempt, embedded_daemon_->getCrashCount());
|
|
} else {
|
|
connection_status_ = "Starting dragonxd...";
|
|
if (startEmbeddedDaemon()) {
|
|
VERBOSE_LOGF("[connect #%d] Embedded daemon starting, will retry connection...\n", attempt);
|
|
} else if (embedded_daemon_ && embedded_daemon_->externalDaemonDetected()) {
|
|
connection_status_ = "Connecting to daemon...";
|
|
VERBOSE_LOGF("[connect #%d] External daemon detected, will connect via RPC...\n", attempt);
|
|
} else {
|
|
VERBOSE_LOGF("[connect #%d] Failed to start embedded daemon — lastError: %s\n",
|
|
attempt,
|
|
embedded_daemon_ ? embedded_daemon_->getLastError().c_str() : "(no daemon object)");
|
|
}
|
|
}
|
|
} else if (!use_embedded_daemon_) {
|
|
VERBOSE_LOGF("[connect #%d] Embedded daemon disabled — external daemon at %s:%s not responding\n",
|
|
attempt, config.host.c_str(), config.port.c_str());
|
|
} else {
|
|
VERBOSE_LOGF("[connect #%d] Embedded daemon is running but RPC failed — daemon may be initializing\n", attempt);
|
|
}
|
|
}
|
|
}
|
|
connection_in_progress_ = false;
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::onConnected()
|
|
{
|
|
state_.connected = true;
|
|
connection_status_ = "Connected";
|
|
|
|
// Reset crash counter on successful connection
|
|
if (embedded_daemon_) {
|
|
embedded_daemon_->resetCrashCount();
|
|
}
|
|
|
|
// Get daemon info + wallet encryption state on the worker thread.
|
|
// Fetching getwalletinfo here (before refreshData) ensures the lock
|
|
// screen appears immediately instead of after 6+ queued RPC calls.
|
|
if (worker_ && rpc_) {
|
|
worker_->post([this]() -> rpc::RPCWorker::MainCb {
|
|
json info, walletInfo;
|
|
bool infoOk = false, walletOk = false;
|
|
try {
|
|
info = rpc_->call("getinfo");
|
|
infoOk = true;
|
|
} catch (...) {}
|
|
try {
|
|
walletInfo = rpc_->call("getwalletinfo");
|
|
walletOk = true;
|
|
} catch (...) {}
|
|
return [this, info, walletInfo, infoOk, walletOk]() {
|
|
if (infoOk) {
|
|
try {
|
|
if (info.contains("version"))
|
|
state_.daemon_version = info["version"].get<int>();
|
|
if (info.contains("protocolversion"))
|
|
state_.protocol_version = info["protocolversion"].get<int>();
|
|
if (info.contains("p2pport"))
|
|
state_.p2p_port = info["p2pport"].get<int>();
|
|
if (info.contains("longestchain")) {
|
|
int lc = info["longestchain"].get<int>();
|
|
// Don't regress to 0 — daemon returns 0 when peers haven't been polled
|
|
if (lc > 0) state_.longestchain = lc;
|
|
}
|
|
if (info.contains("notarized"))
|
|
state_.notarized = info["notarized"].get<int>();
|
|
if (info.contains("blocks"))
|
|
state_.sync.blocks = info["blocks"].get<int>();
|
|
// longestchain can lag behind blocks when peer data is stale
|
|
if (state_.longestchain > 0 && state_.sync.blocks > state_.longestchain)
|
|
state_.longestchain = state_.sync.blocks;
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("[onConnected] getinfo callback error: %s\n", e.what());
|
|
}
|
|
}
|
|
// Apply encryption/lock state immediately so the lock
|
|
// screen shows on the very first frame after connect.
|
|
if (walletOk) {
|
|
try {
|
|
if (walletInfo.contains("unlocked_until")) {
|
|
state_.encrypted = true;
|
|
int64_t until = walletInfo["unlocked_until"].get<int64_t>();
|
|
state_.unlocked_until = until;
|
|
state_.locked = (until == 0);
|
|
} else {
|
|
state_.encrypted = false;
|
|
state_.locked = false;
|
|
state_.unlocked_until = 0;
|
|
}
|
|
state_.encryption_state_known = true;
|
|
} catch (...) {}
|
|
}
|
|
};
|
|
});
|
|
}
|
|
|
|
// onConnected already fetched getwalletinfo — tell refreshData to skip
|
|
// the duplicate call on the very first cycle.
|
|
encryption_state_prefetched_ = true;
|
|
|
|
// Addresses are unknown on fresh connect — force a fetch
|
|
addresses_dirty_ = true;
|
|
|
|
// Start the fast-lane RPC connection (dedicated to 1-second mining polls).
|
|
// Uses its own curl handle + worker thread so getlocalsolps never blocks
|
|
// behind the main refresh batch.
|
|
if (!fast_rpc_) {
|
|
fast_rpc_ = std::make_unique<rpc::RPCClient>();
|
|
}
|
|
if (!fast_worker_) {
|
|
fast_worker_ = std::make_unique<rpc::RPCWorker>();
|
|
fast_worker_->start();
|
|
}
|
|
// Connect on the fast worker's own thread (non-blocking to main)
|
|
fast_worker_->post([this]() -> rpc::RPCWorker::MainCb {
|
|
bool ok = fast_rpc_->connect(saved_config_.host, saved_config_.port,
|
|
saved_config_.rpcuser, saved_config_.rpcpassword);
|
|
return [ok]() {
|
|
if (!ok) {
|
|
DEBUG_LOGF("[FastLane] Failed to connect secondary RPC client\\n");
|
|
} else {
|
|
DEBUG_LOGF("[FastLane] Secondary RPC client connected\\n");
|
|
}
|
|
};
|
|
});
|
|
|
|
// Initial data refresh
|
|
refreshData();
|
|
refreshMarketData();
|
|
|
|
// Apply compiled-in default ban list
|
|
applyDefaultBanlist();
|
|
}
|
|
|
|
void App::onDisconnected(const std::string& reason)
|
|
{
|
|
state_.connected = false;
|
|
state_.clear();
|
|
connection_status_ = reason;
|
|
|
|
// Clear RPC result caches
|
|
viewtx_cache_.clear();
|
|
confirmed_tx_cache_.clear();
|
|
confirmed_tx_ids_.clear();
|
|
confirmed_cache_block_ = -1;
|
|
last_tx_block_height_ = -1;
|
|
|
|
// Tear down the fast-lane connection
|
|
if (fast_worker_) {
|
|
fast_worker_->stop();
|
|
}
|
|
if (fast_rpc_) {
|
|
fast_rpc_->disconnect();
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// Data Refresh — Tab-Aware Prioritized System
|
|
//
|
|
// Data is split into independent categories, each with its own refresh
|
|
// function, timer, and in-progress guard. The orchestrator (refreshData)
|
|
// dispatches all categories, but each can also be called independently
|
|
// (e.g. on tab switch for immediate refresh).
|
|
//
|
|
// Categories:
|
|
// Core — z_gettotalbalance + getblockchaininfo (balance, sync)
|
|
// Addresses — z_listaddresses + listunspent (address list, per-addr balances)
|
|
// Transactions — listtransactions + z_listreceivedbyaddress + z_viewtransaction
|
|
// Peers — getpeerinfo + listbanned (already standalone)
|
|
// Encryption — getwalletinfo (one-shot on connect)
|
|
//
|
|
// Intervals are adjusted by applyRefreshPolicy() based on the active tab,
|
|
// so the user sees faster updates for the data they're interacting with.
|
|
// ============================================================================
|
|
|
|
App::RefreshIntervals App::getIntervalsForPage(ui::NavPage page)
|
|
{
|
|
using NP = ui::NavPage;
|
|
switch (page) {
|
|
case NP::Overview: return {2.0f, 10.0f, 15.0f, 0.0f};
|
|
case NP::Send: return {3.0f, 10.0f, 5.0f, 0.0f};
|
|
case NP::Receive: return {5.0f, 15.0f, 5.0f, 0.0f};
|
|
case NP::History: return {5.0f, 3.0f, 15.0f, 0.0f};
|
|
case NP::Mining: return {5.0f, 15.0f, 15.0f, 0.0f};
|
|
case NP::Peers: return {5.0f, 15.0f, 15.0f, 5.0f};
|
|
case NP::Market: return {5.0f, 15.0f, 15.0f, 0.0f};
|
|
default: return {5.0f, 15.0f, 15.0f, 0.0f};
|
|
}
|
|
}
|
|
|
|
void App::applyRefreshPolicy(ui::NavPage page)
|
|
{
|
|
auto intervals = getIntervalsForPage(page);
|
|
active_core_interval_ = intervals.core;
|
|
active_tx_interval_ = intervals.transactions;
|
|
active_addr_interval_ = intervals.addresses;
|
|
active_peer_interval_ = intervals.peers;
|
|
}
|
|
|
|
void App::setCurrentPage(ui::NavPage page)
|
|
{
|
|
if (page == current_page_) return;
|
|
current_page_ = page;
|
|
applyRefreshPolicy(page);
|
|
|
|
// Immediate refresh for the incoming tab's priority data
|
|
if (state_.connected && !state_.isLocked()) {
|
|
using NP = ui::NavPage;
|
|
switch (page) {
|
|
case NP::Overview:
|
|
refreshCoreData();
|
|
core_timer_ = 0.0f;
|
|
break;
|
|
case NP::History:
|
|
transactions_dirty_ = true;
|
|
refreshTransactionData();
|
|
transaction_timer_ = 0.0f;
|
|
break;
|
|
case NP::Send:
|
|
case NP::Receive:
|
|
addresses_dirty_ = true;
|
|
refreshAddressData();
|
|
address_timer_ = 0.0f;
|
|
break;
|
|
case NP::Peers:
|
|
refreshPeerInfo();
|
|
peer_timer_ = 0.0f;
|
|
break;
|
|
case NP::Mining:
|
|
refreshMiningInfo();
|
|
break;
|
|
default:
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
bool App::shouldRefreshTransactions() const
|
|
{
|
|
const int currentBlocks = state_.sync.blocks;
|
|
return last_tx_block_height_ < 0
|
|
|| currentBlocks != last_tx_block_height_
|
|
|| state_.transactions.empty()
|
|
|| transactions_dirty_
|
|
|| tx_age_timer_ >= TX_MAX_AGE;
|
|
}
|
|
|
|
void App::refreshData()
|
|
{
|
|
if (!state_.connected || !rpc_ || !worker_) return;
|
|
|
|
// Dispatch each category independently — results trickle into the UI
|
|
// as each completes, rather than waiting for the slowest phase.
|
|
refreshCoreData();
|
|
|
|
if (addresses_dirty_)
|
|
refreshAddressData();
|
|
|
|
if (shouldRefreshTransactions())
|
|
refreshTransactionData();
|
|
|
|
if (current_page_ == ui::NavPage::Peers)
|
|
refreshPeerInfo();
|
|
|
|
if (!encryption_state_prefetched_) {
|
|
encryption_state_prefetched_ = false;
|
|
refreshEncryptionState();
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// Core Data: balance + blockchain info (~50-100ms, 2 RPC calls)
|
|
// Uses fast_worker_ when on Overview tab for lower latency.
|
|
// ============================================================================
|
|
|
|
void App::refreshCoreData()
|
|
{
|
|
if (!state_.connected) return;
|
|
|
|
// Use fast-lane on Overview for snappier balance updates
|
|
bool useFast = (current_page_ == ui::NavPage::Overview);
|
|
auto* w = useFast && fast_worker_ && fast_worker_->isRunning()
|
|
? fast_worker_.get() : worker_.get();
|
|
auto* rpc = useFast && fast_rpc_ && fast_rpc_->isConnected()
|
|
? fast_rpc_.get() : rpc_.get();
|
|
if (!w || !rpc) return;
|
|
|
|
if (core_refresh_in_progress_.exchange(true)) return;
|
|
|
|
w->post([this, rpc]() -> rpc::RPCWorker::MainCb {
|
|
json totalBal, blockInfo;
|
|
bool balOk = false, blockOk = false;
|
|
|
|
try {
|
|
totalBal = rpc->call("z_gettotalbalance");
|
|
balOk = true;
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("Balance error: %s\n", e.what());
|
|
}
|
|
try {
|
|
blockInfo = rpc->call("getblockchaininfo");
|
|
blockOk = true;
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("BlockchainInfo error: %s\n", e.what());
|
|
}
|
|
|
|
return [this, totalBal, blockInfo, balOk, blockOk]() {
|
|
try {
|
|
if (balOk) {
|
|
if (totalBal.contains("private"))
|
|
state_.shielded_balance = std::stod(totalBal["private"].get<std::string>());
|
|
if (totalBal.contains("transparent"))
|
|
state_.transparent_balance = std::stod(totalBal["transparent"].get<std::string>());
|
|
if (totalBal.contains("total"))
|
|
state_.total_balance = std::stod(totalBal["total"].get<std::string>());
|
|
state_.last_balance_update = std::time(nullptr);
|
|
}
|
|
if (blockOk) {
|
|
if (blockInfo.contains("blocks"))
|
|
state_.sync.blocks = blockInfo["blocks"].get<int>();
|
|
if (blockInfo.contains("headers"))
|
|
state_.sync.headers = blockInfo["headers"].get<int>();
|
|
if (blockInfo.contains("verificationprogress"))
|
|
state_.sync.verification_progress = blockInfo["verificationprogress"].get<double>();
|
|
if (blockInfo.contains("longestchain")) {
|
|
int lc = blockInfo["longestchain"].get<int>();
|
|
if (lc > 0) state_.longestchain = lc;
|
|
}
|
|
if (state_.longestchain > 0 && state_.sync.blocks > state_.longestchain)
|
|
state_.longestchain = state_.sync.blocks;
|
|
if (state_.longestchain > 0)
|
|
state_.sync.syncing = (state_.sync.blocks < state_.longestchain - 2);
|
|
else
|
|
state_.sync.syncing = (state_.sync.blocks < state_.sync.headers - 2);
|
|
if (blockInfo.contains("notarized"))
|
|
state_.notarized = blockInfo["notarized"].get<int>();
|
|
}
|
|
// Auto-shield transparent funds if enabled
|
|
if (balOk && settings_ && settings_->getAutoShield() &&
|
|
state_.transparent_balance > 0.0001 && !state_.sync.syncing &&
|
|
!auto_shield_pending_.exchange(true)) {
|
|
std::string targetZAddr;
|
|
for (const auto& addr : state_.addresses) {
|
|
if (addr.isShielded()) {
|
|
targetZAddr = addr.address;
|
|
break;
|
|
}
|
|
}
|
|
if (!targetZAddr.empty() && rpc_) {
|
|
DEBUG_LOGF("[AutoShield] Shielding %.8f DRGX to %s\n",
|
|
state_.transparent_balance, targetZAddr.c_str());
|
|
rpc_->z_shieldCoinbase("*", targetZAddr, 0.0001, 50,
|
|
[this](const json& result) {
|
|
if (result.contains("opid")) {
|
|
DEBUG_LOGF("[AutoShield] Started: %s\n",
|
|
result["opid"].get<std::string>().c_str());
|
|
}
|
|
auto_shield_pending_ = false;
|
|
},
|
|
[this](const std::string& err) {
|
|
DEBUG_LOGF("[AutoShield] Error: %s\n", err.c_str());
|
|
auto_shield_pending_ = false;
|
|
});
|
|
} else {
|
|
auto_shield_pending_ = false;
|
|
}
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("[refreshCoreData] callback error: %s\n", e.what());
|
|
}
|
|
core_refresh_in_progress_.store(false, std::memory_order_release);
|
|
};
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// Address Data: z/t address lists + per-address balances
|
|
// ============================================================================
|
|
|
|
void App::refreshAddressData()
|
|
{
|
|
if (!worker_ || !rpc_ || !state_.connected) return;
|
|
if (address_refresh_in_progress_.exchange(true)) return;
|
|
|
|
worker_->post([this]() -> rpc::RPCWorker::MainCb {
|
|
std::vector<AddressInfo> zAddrs, tAddrs;
|
|
|
|
// z-addresses
|
|
try {
|
|
json zList = rpc_->call("z_listaddresses");
|
|
for (const auto& addr : zList) {
|
|
AddressInfo info;
|
|
info.address = addr.get<std::string>();
|
|
info.type = "shielded";
|
|
zAddrs.push_back(info);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("z_listaddresses error: %s\n", e.what());
|
|
}
|
|
// z-balances via z_listunspent (single call)
|
|
try {
|
|
json unspent = rpc_->call("z_listunspent");
|
|
std::map<std::string, double> zBalances;
|
|
for (const auto& utxo : unspent) {
|
|
if (utxo.contains("address") && utxo.contains("amount")) {
|
|
zBalances[utxo["address"].get<std::string>()] += utxo["amount"].get<double>();
|
|
}
|
|
}
|
|
for (auto& info : zAddrs) {
|
|
auto it = zBalances.find(info.address);
|
|
if (it != zBalances.end()) info.balance = it->second;
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("z_listunspent unavailable (%s), falling back to z_getbalance\n", e.what());
|
|
for (auto& info : zAddrs) {
|
|
try {
|
|
json bal = rpc_->call("z_getbalance", json::array({info.address}));
|
|
if (!bal.is_null()) info.balance = bal.get<double>();
|
|
} catch (...) {}
|
|
}
|
|
}
|
|
// t-addresses
|
|
try {
|
|
json tList = rpc_->call("getaddressesbyaccount", json::array({""}));
|
|
for (const auto& addr : tList) {
|
|
AddressInfo info;
|
|
info.address = addr.get<std::string>();
|
|
info.type = "transparent";
|
|
tAddrs.push_back(info);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("getaddressesbyaccount error: %s\n", e.what());
|
|
}
|
|
// t-balances via listunspent
|
|
try {
|
|
json utxos = rpc_->call("listunspent");
|
|
std::map<std::string, double> tBalances;
|
|
for (const auto& utxo : utxos) {
|
|
tBalances[utxo["address"].get<std::string>()] += utxo["amount"].get<double>();
|
|
}
|
|
for (auto& info : tAddrs) {
|
|
auto it = tBalances.find(info.address);
|
|
if (it != tBalances.end()) info.balance = it->second;
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("listunspent error: %s\n", e.what());
|
|
}
|
|
|
|
return [this, zAddrs = std::move(zAddrs), tAddrs = std::move(tAddrs)]() {
|
|
state_.z_addresses = std::move(zAddrs);
|
|
state_.t_addresses = std::move(tAddrs);
|
|
address_list_dirty_ = true;
|
|
addresses_dirty_ = false;
|
|
address_refresh_in_progress_.store(false, std::memory_order_release);
|
|
};
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// Transaction Data: transparent + shielded receives + z_viewtransaction enrichment
|
|
// ============================================================================
|
|
|
|
void App::refreshTransactionData()
|
|
{
|
|
if (!worker_ || !rpc_ || !state_.connected) return;
|
|
if (tx_refresh_in_progress_.exchange(true)) return;
|
|
|
|
// Capture decision state on main thread
|
|
const int currentBlocks = state_.sync.blocks;
|
|
transactions_dirty_ = false;
|
|
tx_age_timer_ = 0.0f;
|
|
|
|
// Snapshot z-addresses for shielded receive lookups
|
|
std::vector<std::string> txZAddrs;
|
|
for (const auto& za : state_.z_addresses) {
|
|
if (!za.address.empty()) txZAddrs.push_back(za.address);
|
|
}
|
|
|
|
// Collect txids that are fully enriched (skip re-enrichment)
|
|
std::unordered_set<std::string> fullyEnriched;
|
|
for (const auto& [txid, _] : viewtx_cache_) {
|
|
fullyEnriched.insert(txid);
|
|
}
|
|
for (const auto& tx : state_.transactions) {
|
|
if (tx.confirmations > 6 && tx.timestamp != 0) {
|
|
fullyEnriched.insert(tx.txid);
|
|
}
|
|
}
|
|
|
|
// Snapshot caches for the worker thread
|
|
auto viewtxCacheSnap = viewtx_cache_;
|
|
auto sendTxidsSnap = send_txids_;
|
|
|
|
worker_->post([this, currentBlocks,
|
|
txZAddrs = std::move(txZAddrs),
|
|
fullyEnriched = std::move(fullyEnriched),
|
|
viewtxCacheSnap = std::move(viewtxCacheSnap),
|
|
sendTxidsSnap = std::move(sendTxidsSnap)]() -> rpc::RPCWorker::MainCb {
|
|
std::vector<TransactionInfo> txns;
|
|
std::unordered_map<std::string, ViewTxCacheEntry> newViewTxEntries;
|
|
std::set<std::string> knownTxids;
|
|
|
|
// Phase 3a: transparent transactions
|
|
try {
|
|
json result = rpc_->call("listtransactions", json::array({"", 9999}));
|
|
for (const auto& tx : result) {
|
|
TransactionInfo info;
|
|
if (tx.contains("txid")) info.txid = tx["txid"].get<std::string>();
|
|
if (tx.contains("category")) info.type = tx["category"].get<std::string>();
|
|
if (tx.contains("amount")) info.amount = tx["amount"].get<double>();
|
|
if (tx.contains("time")) info.timestamp = tx["time"].get<int64_t>();
|
|
else if (tx.contains("timereceived")) info.timestamp = tx["timereceived"].get<int64_t>();
|
|
if (tx.contains("confirmations")) info.confirmations = tx["confirmations"].get<int>();
|
|
if (tx.contains("address")) info.address = tx["address"].get<std::string>();
|
|
knownTxids.insert(info.txid);
|
|
txns.push_back(info);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("listtransactions error: %s\n", e.what());
|
|
}
|
|
|
|
// Phase 3b: shielded receives
|
|
for (const auto& addr : txZAddrs) {
|
|
try {
|
|
json zresult = rpc_->call("z_listreceivedbyaddress", json::array({addr, 0}));
|
|
if (zresult.is_null() || !zresult.is_array()) continue;
|
|
for (const auto& note : zresult) {
|
|
std::string txid;
|
|
if (note.contains("txid")) txid = note["txid"].get<std::string>();
|
|
if (txid.empty()) continue;
|
|
if (note.contains("change") && note["change"].get<bool>()) continue;
|
|
bool dominated = false;
|
|
for (const auto& existing : txns) {
|
|
if (existing.txid == txid && existing.type == "receive") {
|
|
dominated = true; break;
|
|
}
|
|
}
|
|
if (dominated) continue;
|
|
TransactionInfo info;
|
|
info.txid = txid;
|
|
info.type = "receive";
|
|
info.address = addr;
|
|
if (note.contains("amount")) info.amount = note["amount"].get<double>();
|
|
if (note.contains("confirmations")) info.confirmations = note["confirmations"].get<int>();
|
|
if (note.contains("time")) info.timestamp = note["time"].get<int64_t>();
|
|
if (note.contains("memoStr")) info.memo = note["memoStr"].get<std::string>();
|
|
knownTxids.insert(txid);
|
|
txns.push_back(info);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("z_listreceivedbyaddress error for %s: %s\n",
|
|
addr.substr(0, 12).c_str(), e.what());
|
|
}
|
|
}
|
|
|
|
// Include txids from completed z_sendmany operations
|
|
for (const auto& txid : sendTxidsSnap) {
|
|
knownTxids.insert(txid);
|
|
}
|
|
|
|
// Phase 3c: detect shielded sends via z_viewtransaction
|
|
int viewTxCount = 0;
|
|
|
|
auto applyViewTxEntry = [&](const std::string& txid,
|
|
const ViewTxCacheEntry& entry) {
|
|
for (const auto& out : entry.outgoing_outputs) {
|
|
bool alreadyTracked = false;
|
|
for (const auto& existing : txns) {
|
|
if (existing.txid == txid && existing.type == "send"
|
|
&& std::abs(existing.amount + out.value) < 0.00000001) {
|
|
alreadyTracked = true; break;
|
|
}
|
|
}
|
|
if (alreadyTracked) continue;
|
|
TransactionInfo info;
|
|
info.txid = txid;
|
|
info.type = "send";
|
|
info.address = out.address;
|
|
info.amount = -out.value;
|
|
info.memo = out.memo;
|
|
info.from_address = entry.from_address;
|
|
for (const auto& existing : txns) {
|
|
if (existing.txid == txid) {
|
|
info.confirmations = existing.confirmations;
|
|
info.timestamp = existing.timestamp;
|
|
break;
|
|
}
|
|
}
|
|
txns.push_back(info);
|
|
}
|
|
};
|
|
|
|
for (const std::string& txid : knownTxids) {
|
|
if (fullyEnriched.count(txid)) continue;
|
|
|
|
auto cit = viewtxCacheSnap.find(txid);
|
|
if (cit != viewtxCacheSnap.end()) {
|
|
applyViewTxEntry(txid, cit->second);
|
|
continue;
|
|
}
|
|
|
|
if (viewTxCount >= MAX_VIEWTX_PER_CYCLE) break;
|
|
++viewTxCount;
|
|
|
|
try {
|
|
json vtx = rpc_->call("z_viewtransaction", json::array({txid}));
|
|
if (vtx.is_null() || !vtx.is_object()) continue;
|
|
|
|
ViewTxCacheEntry entry;
|
|
if (vtx.contains("spends") && vtx["spends"].is_array()) {
|
|
for (const auto& spend : vtx["spends"]) {
|
|
if (spend.contains("address")) {
|
|
entry.from_address = spend["address"].get<std::string>();
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
if (vtx.contains("outputs") && vtx["outputs"].is_array()) {
|
|
for (const auto& output : vtx["outputs"]) {
|
|
bool outgoing = false;
|
|
if (output.contains("outgoing"))
|
|
outgoing = output["outgoing"].get<bool>();
|
|
if (!outgoing) continue;
|
|
ViewTxCacheEntry::Output out;
|
|
if (output.contains("address"))
|
|
out.address = output["address"].get<std::string>();
|
|
if (output.contains("value"))
|
|
out.value = output["value"].get<double>();
|
|
if (output.contains("memoStr"))
|
|
out.memo = output["memoStr"].get<std::string>();
|
|
entry.outgoing_outputs.push_back(std::move(out));
|
|
}
|
|
}
|
|
|
|
applyViewTxEntry(txid, entry);
|
|
|
|
for (auto& info : txns) {
|
|
if (info.txid == txid && info.timestamp == 0) {
|
|
try {
|
|
json rawtx = rpc_->call("gettransaction", json::array({txid}));
|
|
if (!rawtx.is_null() && rawtx.contains("time"))
|
|
info.timestamp = rawtx["time"].get<int64_t>();
|
|
if (!rawtx.is_null() && rawtx.contains("confirmations"))
|
|
info.confirmations = rawtx["confirmations"].get<int>();
|
|
} catch (...) {}
|
|
break;
|
|
}
|
|
}
|
|
|
|
newViewTxEntries[txid] = std::move(entry);
|
|
} catch (const std::exception& e) {
|
|
(void)e;
|
|
}
|
|
}
|
|
|
|
std::sort(txns.begin(), txns.end(),
|
|
[](const TransactionInfo& a, const TransactionInfo& b) {
|
|
return a.timestamp > b.timestamp;
|
|
});
|
|
|
|
return [this, txns = std::move(txns), currentBlocks,
|
|
newViewTxEntries = std::move(newViewTxEntries)]() {
|
|
state_.transactions = std::move(txns);
|
|
state_.last_tx_update = std::time(nullptr);
|
|
last_tx_block_height_ = currentBlocks;
|
|
|
|
for (auto& [txid, entry] : newViewTxEntries) {
|
|
viewtx_cache_[txid] = std::move(entry);
|
|
send_txids_.erase(txid);
|
|
}
|
|
|
|
confirmed_tx_cache_.clear();
|
|
confirmed_tx_ids_.clear();
|
|
for (const auto& tx : state_.transactions) {
|
|
if (tx.confirmations >= 10 && tx.timestamp != 0) {
|
|
confirmed_tx_ids_.insert(tx.txid);
|
|
confirmed_tx_cache_.push_back(tx);
|
|
}
|
|
}
|
|
confirmed_cache_block_ = currentBlocks;
|
|
tx_refresh_in_progress_.store(false, std::memory_order_release);
|
|
};
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// Encryption State: wallet info (one-shot on connect, lightweight)
|
|
// ============================================================================
|
|
|
|
void App::refreshEncryptionState()
|
|
{
|
|
if (!worker_ || !rpc_ || !state_.connected) return;
|
|
|
|
worker_->post([this]() -> rpc::RPCWorker::MainCb {
|
|
json walletInfo;
|
|
bool ok = false;
|
|
try {
|
|
walletInfo = rpc_->call("getwalletinfo");
|
|
ok = true;
|
|
} catch (...) {}
|
|
|
|
if (!ok) return nullptr;
|
|
|
|
return [this, walletInfo]() {
|
|
try {
|
|
if (walletInfo.contains("unlocked_until")) {
|
|
state_.encrypted = true;
|
|
int64_t until = walletInfo["unlocked_until"].get<int64_t>();
|
|
state_.unlocked_until = until;
|
|
state_.locked = (until == 0);
|
|
} else {
|
|
state_.encrypted = false;
|
|
state_.locked = false;
|
|
state_.unlocked_until = 0;
|
|
}
|
|
state_.encryption_state_known = true;
|
|
} catch (...) {}
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::refreshBalance()
|
|
{
|
|
refreshCoreData();
|
|
}
|
|
|
|
void App::refreshAddresses()
|
|
{
|
|
addresses_dirty_ = true;
|
|
refreshAddressData();
|
|
}
|
|
|
|
void App::refreshMiningInfo()
|
|
{
|
|
// Use the dedicated fast-lane worker + connection so mining polls
|
|
// never block behind the main refresh batch. Falls back to the main
|
|
// worker if the fast lane isn't ready yet (e.g. during initial connect).
|
|
auto* w = (fast_worker_ && fast_worker_->isRunning()) ? fast_worker_.get() : worker_.get();
|
|
auto* rpc = (fast_rpc_ && fast_rpc_->isConnected()) ? fast_rpc_.get() : rpc_.get();
|
|
if (!w || !rpc) return;
|
|
|
|
// Prevent worker queue pileup — skip if previous refresh hasn't finished
|
|
if (mining_refresh_in_progress_.exchange(true)) return;
|
|
|
|
// Capture daemon memory outside (may be accessed on main thread)
|
|
double daemonMemMb = 0.0;
|
|
if (embedded_daemon_) {
|
|
daemonMemMb = embedded_daemon_->getMemoryUsageMB();
|
|
}
|
|
|
|
// Slow-tick counter: run full getmininginfo every ~5 seconds
|
|
// to reduce RPC overhead. getlocalsolps (returns H/s for RandomX) runs every tick (1s).
|
|
// NOTE: getinfo is NOT called here — longestchain/notarized are updated by
|
|
// refreshBalance (via getblockchaininfo), and daemon_version/protocol_version/
|
|
// p2p_port are static for the lifetime of a connection (set in onConnected).
|
|
bool doSlowRefresh = (mining_slow_counter_++ % 5 == 0);
|
|
|
|
w->post([this, rpc, daemonMemMb, doSlowRefresh]() -> rpc::RPCWorker::MainCb {
|
|
json miningInfo, localHashrateJson;
|
|
bool miningOk = false, hashrateOk = false;
|
|
|
|
// Fast path: only getlocalsolps (single RPC call, ~1ms) — returns H/s (RandomX)
|
|
try {
|
|
localHashrateJson = rpc->call("getlocalsolps");
|
|
hashrateOk = true;
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("getLocalHashrate error: %s\n", e.what());
|
|
}
|
|
|
|
// Slow path: getmininginfo every ~5s
|
|
if (doSlowRefresh) {
|
|
try {
|
|
miningInfo = rpc->call("getmininginfo");
|
|
miningOk = true;
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("getMiningInfo error: %s\n", e.what());
|
|
}
|
|
}
|
|
|
|
return [this, miningInfo, localHashrateJson, miningOk, hashrateOk, daemonMemMb]() {
|
|
try {
|
|
if (hashrateOk) {
|
|
state_.mining.localHashrate = localHashrateJson.get<double>();
|
|
state_.mining.hashrate_history.push_back(state_.mining.localHashrate);
|
|
if (state_.mining.hashrate_history.size() > MiningInfo::MAX_HISTORY) {
|
|
state_.mining.hashrate_history.erase(state_.mining.hashrate_history.begin());
|
|
}
|
|
}
|
|
if (miningOk) {
|
|
if (miningInfo.contains("generate"))
|
|
state_.mining.generate = miningInfo["generate"].get<bool>();
|
|
if (miningInfo.contains("genproclimit"))
|
|
state_.mining.genproclimit = miningInfo["genproclimit"].get<int>();
|
|
if (miningInfo.contains("blocks"))
|
|
state_.mining.blocks = miningInfo["blocks"].get<int>();
|
|
if (miningInfo.contains("difficulty"))
|
|
state_.mining.difficulty = miningInfo["difficulty"].get<double>();
|
|
if (miningInfo.contains("networkhashps"))
|
|
state_.mining.networkHashrate = miningInfo["networkhashps"].get<double>();
|
|
if (miningInfo.contains("chain"))
|
|
state_.mining.chain = miningInfo["chain"].get<std::string>();
|
|
state_.last_mining_update = std::time(nullptr);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("[refreshMiningInfo] callback error: %s\n", e.what());
|
|
}
|
|
state_.mining.daemon_memory_mb = daemonMemMb;
|
|
mining_refresh_in_progress_.store(false);
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::refreshPeerInfo()
|
|
{
|
|
if (!rpc_) return;
|
|
|
|
// Use fast-lane worker to bypass head-of-line blocking behind refreshData.
|
|
auto* w = (fast_worker_ && fast_worker_->isRunning()) ? fast_worker_.get() : worker_.get();
|
|
auto* r = (fast_rpc_ && fast_rpc_->isConnected()) ? fast_rpc_.get() : rpc_.get();
|
|
if (!w) return;
|
|
|
|
peer_refresh_in_progress_.store(true, std::memory_order_relaxed);
|
|
|
|
w->post([this, r]() -> rpc::RPCWorker::MainCb {
|
|
std::vector<PeerInfo> peers;
|
|
std::vector<BannedPeer> bannedPeers;
|
|
|
|
try {
|
|
json result = r->call("getpeerinfo");
|
|
for (const auto& peer : result) {
|
|
PeerInfo info;
|
|
if (peer.contains("id")) info.id = peer["id"].get<int>();
|
|
if (peer.contains("addr")) info.addr = peer["addr"].get<std::string>();
|
|
if (peer.contains("subver")) info.subver = peer["subver"].get<std::string>();
|
|
if (peer.contains("services")) info.services = peer["services"].get<std::string>();
|
|
if (peer.contains("version")) info.version = peer["version"].get<int>();
|
|
if (peer.contains("conntime")) info.conntime = peer["conntime"].get<int64_t>();
|
|
if (peer.contains("banscore")) info.banscore = peer["banscore"].get<int>();
|
|
if (peer.contains("pingtime")) info.pingtime = peer["pingtime"].get<double>();
|
|
if (peer.contains("bytessent")) info.bytessent = peer["bytessent"].get<int64_t>();
|
|
if (peer.contains("bytesrecv")) info.bytesrecv = peer["bytesrecv"].get<int64_t>();
|
|
if (peer.contains("startingheight")) info.startingheight = peer["startingheight"].get<int>();
|
|
if (peer.contains("synced_headers")) info.synced_headers = peer["synced_headers"].get<int>();
|
|
if (peer.contains("synced_blocks")) info.synced_blocks = peer["synced_blocks"].get<int>();
|
|
if (peer.contains("inbound")) info.inbound = peer["inbound"].get<bool>();
|
|
if (peer.contains("tls_cipher")) info.tls_cipher = peer["tls_cipher"].get<std::string>();
|
|
if (peer.contains("tls_verified")) info.tls_verified = peer["tls_verified"].get<bool>();
|
|
peers.push_back(info);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("getPeerInfo error: %s\n", e.what());
|
|
}
|
|
|
|
try {
|
|
json result = r->call("listbanned");
|
|
for (const auto& ban : result) {
|
|
BannedPeer info;
|
|
if (ban.contains("address")) info.address = ban["address"].get<std::string>();
|
|
if (ban.contains("banned_until")) info.banned_until = ban["banned_until"].get<int64_t>();
|
|
bannedPeers.push_back(info);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("listBanned error: %s\n", e.what());
|
|
}
|
|
|
|
return [this, peers = std::move(peers), bannedPeers = std::move(bannedPeers)]() {
|
|
state_.peers = std::move(peers);
|
|
state_.bannedPeers = std::move(bannedPeers);
|
|
state_.last_peer_update = std::time(nullptr);
|
|
peer_refresh_in_progress_.store(false, std::memory_order_relaxed);
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::refreshPrice()
|
|
{
|
|
// Skip if price fetching is disabled
|
|
if (!settings_->getFetchPrices()) return;
|
|
if (!worker_) return;
|
|
|
|
worker_->post([this]() -> rpc::RPCWorker::MainCb {
|
|
// --- Worker thread: blocking HTTP GET to CoinGecko ---
|
|
MarketInfo market;
|
|
bool ok = false;
|
|
|
|
try {
|
|
CURL* curl = curl_easy_init();
|
|
if (!curl) {
|
|
DEBUG_LOGF("Failed to initialize curl for price fetch\n");
|
|
return nullptr;
|
|
}
|
|
|
|
std::string response_data;
|
|
const char* url = "https://api.coingecko.com/api/v3/simple/price?ids=dragonx-2&vs_currencies=usd,btc&include_24hr_change=true&include_24hr_vol=true&include_market_cap=true";
|
|
|
|
auto write_callback = [](void* contents, size_t size, size_t nmemb, std::string* userp) -> size_t {
|
|
size_t totalSize = size * nmemb;
|
|
userp->append((char*)contents, totalSize);
|
|
return totalSize;
|
|
};
|
|
|
|
curl_easy_setopt(curl, CURLOPT_URL, url);
|
|
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, +write_callback);
|
|
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &response_data);
|
|
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10L);
|
|
curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, 5L);
|
|
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
|
|
curl_easy_setopt(curl, CURLOPT_USERAGENT, "DragonX-Wallet/1.0");
|
|
|
|
CURLcode res = curl_easy_perform(curl);
|
|
long http_code = 0;
|
|
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &http_code);
|
|
curl_easy_cleanup(curl);
|
|
|
|
if (res == CURLE_OK && http_code == 200) {
|
|
auto j = json::parse(response_data);
|
|
if (j.contains("dragonx-2")) {
|
|
const auto& data = j["dragonx-2"];
|
|
market.price_usd = data.value("usd", 0.0);
|
|
market.price_btc = data.value("btc", 0.0);
|
|
market.change_24h = data.value("usd_24h_change", 0.0);
|
|
market.volume_24h = data.value("usd_24h_vol", 0.0);
|
|
market.market_cap = data.value("usd_market_cap", 0.0);
|
|
|
|
auto now = std::chrono::system_clock::now();
|
|
auto time_t = std::chrono::system_clock::to_time_t(now);
|
|
char buf[64];
|
|
strftime(buf, sizeof(buf), "%Y-%m-%d %H:%M:%S", localtime(&time_t));
|
|
market.last_updated = buf;
|
|
ok = true;
|
|
DEBUG_LOGF("Price updated: $%.6f USD\n", market.price_usd);
|
|
}
|
|
} else {
|
|
DEBUG_LOGF("Price fetch failed: %s (HTTP %ld)\n",
|
|
res != CURLE_OK ? curl_easy_strerror(res) : "OK", http_code);
|
|
}
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("Price fetch error: %s\n", e.what());
|
|
}
|
|
|
|
if (!ok) return nullptr;
|
|
|
|
return [this, market]() {
|
|
state_.market.price_usd = market.price_usd;
|
|
state_.market.price_btc = market.price_btc;
|
|
state_.market.change_24h = market.change_24h;
|
|
state_.market.volume_24h = market.volume_24h;
|
|
state_.market.market_cap = market.market_cap;
|
|
state_.market.last_updated = market.last_updated;
|
|
state_.market.last_fetch_time = std::chrono::steady_clock::now();
|
|
|
|
state_.market.price_history.push_back(market.price_usd);
|
|
if (state_.market.price_history.size() > MarketInfo::MAX_HISTORY) {
|
|
state_.market.price_history.erase(state_.market.price_history.begin());
|
|
}
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::refreshMarketData()
|
|
{
|
|
refreshPrice();
|
|
}
|
|
|
|
// ============================================================================
|
|
// Mining Operations
|
|
// ============================================================================
|
|
|
|
void App::startMining(int threads)
|
|
{
|
|
if (!state_.connected || !rpc_ || !worker_) return;
|
|
if (mining_toggle_in_progress_.exchange(true)) return; // already in progress
|
|
|
|
worker_->post([this, threads]() -> rpc::RPCWorker::MainCb {
|
|
bool ok = false;
|
|
std::string errMsg;
|
|
try {
|
|
rpc_->call("setgenerate", {true, threads});
|
|
ok = true;
|
|
} catch (const std::exception& e) {
|
|
errMsg = e.what();
|
|
}
|
|
return [this, threads, ok, errMsg]() {
|
|
mining_toggle_in_progress_.store(false);
|
|
if (ok) {
|
|
state_.mining.generate = true;
|
|
state_.mining.genproclimit = threads;
|
|
DEBUG_LOGF("Mining started with %d threads\n", threads);
|
|
} else {
|
|
DEBUG_LOGF("Failed to start mining: %s\n", errMsg.c_str());
|
|
ui::Notifications::instance().error("Mining failed: " + errMsg);
|
|
}
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::stopMining()
|
|
{
|
|
if (!state_.connected || !rpc_ || !worker_) return;
|
|
if (mining_toggle_in_progress_.exchange(true)) return; // already in progress
|
|
|
|
worker_->post([this]() -> rpc::RPCWorker::MainCb {
|
|
bool ok = false;
|
|
try {
|
|
rpc_->call("setgenerate", {false, 0});
|
|
ok = true;
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("Failed to stop mining: %s\n", e.what());
|
|
}
|
|
return [this, ok]() {
|
|
mining_toggle_in_progress_.store(false);
|
|
if (ok) {
|
|
state_.mining.generate = false;
|
|
state_.mining.localHashrate = 0.0;
|
|
DEBUG_LOGF("Mining stopped\n");
|
|
}
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::startPoolMining(int threads)
|
|
{
|
|
if (!xmrig_manager_)
|
|
xmrig_manager_ = std::make_unique<daemon::XmrigManager>();
|
|
|
|
// If already running, stop first (e.g. thread count change)
|
|
if (xmrig_manager_->isRunning()) {
|
|
xmrig_manager_->stop();
|
|
}
|
|
|
|
// Stop solo mining first if active
|
|
if (state_.mining.generate) stopMining();
|
|
|
|
daemon::XmrigManager::Config cfg;
|
|
cfg.pool_url = settings_->getPoolUrl();
|
|
cfg.worker_name = settings_->getPoolWorker();
|
|
cfg.algo = settings_->getPoolAlgo();
|
|
cfg.threads = threads; // Use the same thread selection as solo mining
|
|
cfg.tls = settings_->getPoolTls();
|
|
cfg.hugepages = settings_->getPoolHugepages();
|
|
|
|
// Use first shielded address as the mining wallet address, fall back to transparent
|
|
for (const auto& addr : state_.z_addresses) {
|
|
if (!addr.address.empty()) {
|
|
cfg.wallet_address = addr.address;
|
|
break;
|
|
}
|
|
}
|
|
if (cfg.wallet_address.empty()) {
|
|
for (const auto& addr : state_.addresses) {
|
|
if (addr.type == "transparent" && !addr.address.empty()) {
|
|
cfg.wallet_address = addr.address;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
// Fallback: use pool worker address from settings (available even before
|
|
// the daemon is connected or the blockchain is synced).
|
|
if (cfg.wallet_address.empty() && !cfg.worker_name.empty()) {
|
|
cfg.wallet_address = cfg.worker_name;
|
|
}
|
|
|
|
if (cfg.wallet_address.empty()) {
|
|
DEBUG_LOGF("[ERROR] Pool mining: No wallet address available\n");
|
|
ui::Notifications::instance().error("No wallet address available — generate a Z address in the Receive tab");
|
|
return;
|
|
}
|
|
|
|
if (!xmrig_manager_->start(cfg)) {
|
|
std::string err = xmrig_manager_->getLastError();
|
|
DEBUG_LOGF("[ERROR] Pool mining: %s\n", err.c_str());
|
|
|
|
// Check for Windows Defender blocking (error 225 = ERROR_VIRUS_INFECTED)
|
|
if (err.find("error 225") != std::string::npos ||
|
|
err.find("virus") != std::string::npos) {
|
|
ui::Notifications::instance().error(
|
|
"Windows Defender blocked xmrig. Add exclusion for %APPDATA%\\ObsidianDragon");
|
|
#ifdef _WIN32
|
|
// Offer to open Windows Security settings
|
|
pending_antivirus_dialog_ = true;
|
|
#endif
|
|
} else {
|
|
ui::Notifications::instance().error("Failed to start pool miner: " + err);
|
|
}
|
|
}
|
|
}
|
|
|
|
void App::stopPoolMining()
|
|
{
|
|
if (xmrig_manager_ && xmrig_manager_->isRunning()) {
|
|
xmrig_manager_->stop(3000);
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// Peer Operations
|
|
// ============================================================================
|
|
|
|
void App::banPeer(const std::string& ip, int duration_seconds)
|
|
{
|
|
if (!state_.connected || !rpc_) return;
|
|
|
|
rpc_->setBan(ip, "add", [this](const json&) {
|
|
refreshPeerInfo();
|
|
}, nullptr, duration_seconds);
|
|
}
|
|
|
|
void App::unbanPeer(const std::string& ip)
|
|
{
|
|
if (!state_.connected || !rpc_) return;
|
|
|
|
rpc_->setBan(ip, "remove", [this](const json&) {
|
|
refreshPeerInfo();
|
|
});
|
|
}
|
|
|
|
void App::clearBans()
|
|
{
|
|
if (!state_.connected || !rpc_) return;
|
|
|
|
rpc_->clearBanned([this](const json&) {
|
|
state_.banned_peers.clear();
|
|
});
|
|
}
|
|
|
|
void App::applyDefaultBanlist()
|
|
{
|
|
if (!state_.connected || !rpc_ || !worker_) return;
|
|
|
|
// Parse the embedded default_banlist.txt (compiled from res/default_banlist.txt)
|
|
std::string data(reinterpret_cast<const char*>(embedded::default_banlist_data),
|
|
embedded::default_banlist_size);
|
|
|
|
std::vector<std::string> ips;
|
|
size_t pos = 0;
|
|
while (pos < data.size()) {
|
|
size_t eol = data.find('\n', pos);
|
|
if (eol == std::string::npos) eol = data.size();
|
|
std::string line = data.substr(pos, eol - pos);
|
|
pos = eol + 1;
|
|
|
|
// Strip carriage return (Windows line endings)
|
|
if (!line.empty() && line.back() == '\r') line.pop_back();
|
|
// Strip leading/trailing whitespace
|
|
size_t start = line.find_first_not_of(" \t");
|
|
if (start == std::string::npos) continue;
|
|
line = line.substr(start, line.find_last_not_of(" \t") - start + 1);
|
|
// Skip empty lines and comments
|
|
if (line.empty() || line[0] == '#') continue;
|
|
|
|
ips.push_back(line);
|
|
}
|
|
|
|
if (ips.empty()) return;
|
|
|
|
// Apply bans on the worker thread to avoid blocking the UI
|
|
worker_->post([this, ips]() -> rpc::RPCWorker::MainCb {
|
|
int applied = 0;
|
|
for (const auto& ip : ips) {
|
|
try {
|
|
// 0 = permanent ban (until node restart or manual unban)
|
|
// Using a very long duration (10 years) for effectively permanent bans
|
|
rpc_->call("setban", {ip, "add", 315360000});
|
|
applied++;
|
|
} catch (...) {
|
|
// Already banned or invalid — skip silently
|
|
}
|
|
}
|
|
return [applied]() {
|
|
if (applied > 0) {
|
|
DEBUG_LOGF("[Banlist] Applied %d default bans\n", applied);
|
|
}
|
|
};
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// Address Operations
|
|
// ============================================================================
|
|
|
|
void App::createNewZAddress(std::function<void(const std::string&)> callback)
|
|
{
|
|
if (!state_.connected || !rpc_ || !worker_) return;
|
|
|
|
worker_->post([this, callback]() -> rpc::RPCWorker::MainCb {
|
|
std::string addr;
|
|
try {
|
|
json result = rpc_->call("z_getnewaddress");
|
|
addr = result.get<std::string>();
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("z_getnewaddress error: %s\n", e.what());
|
|
}
|
|
return [this, callback, addr]() {
|
|
if (!addr.empty()) {
|
|
// Inject immediately so UI can select the address next frame
|
|
AddressInfo info;
|
|
info.address = addr;
|
|
info.type = "shielded";
|
|
info.balance = 0.0;
|
|
state_.z_addresses.push_back(info);
|
|
address_list_dirty_ = true;
|
|
// Also trigger full refresh to get proper balances
|
|
addresses_dirty_ = true;
|
|
refreshAddresses();
|
|
}
|
|
if (callback) callback(addr);
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::createNewTAddress(std::function<void(const std::string&)> callback)
|
|
{
|
|
if (!state_.connected || !rpc_ || !worker_) return;
|
|
|
|
worker_->post([this, callback]() -> rpc::RPCWorker::MainCb {
|
|
std::string addr;
|
|
try {
|
|
json result = rpc_->call("getnewaddress");
|
|
addr = result.get<std::string>();
|
|
} catch (const std::exception& e) {
|
|
DEBUG_LOGF("getnewaddress error: %s\n", e.what());
|
|
}
|
|
return [this, callback, addr]() {
|
|
if (!addr.empty()) {
|
|
// Inject immediately so UI can select the address next frame
|
|
AddressInfo info;
|
|
info.address = addr;
|
|
info.type = "transparent";
|
|
info.balance = 0.0;
|
|
state_.t_addresses.push_back(info);
|
|
address_list_dirty_ = true;
|
|
// Also trigger full refresh to get proper balances
|
|
addresses_dirty_ = true;
|
|
refreshAddresses();
|
|
}
|
|
if (callback) callback(addr);
|
|
};
|
|
});
|
|
}
|
|
|
|
void App::hideAddress(const std::string& addr)
|
|
{
|
|
if (settings_) {
|
|
settings_->hideAddress(addr);
|
|
settings_->save();
|
|
}
|
|
}
|
|
|
|
void App::unhideAddress(const std::string& addr)
|
|
{
|
|
if (settings_) {
|
|
settings_->unhideAddress(addr);
|
|
settings_->save();
|
|
}
|
|
}
|
|
|
|
bool App::isAddressHidden(const std::string& addr) const
|
|
{
|
|
return settings_ && settings_->isAddressHidden(addr);
|
|
}
|
|
|
|
int App::getHiddenAddressCount() const
|
|
{
|
|
return settings_ ? settings_->getHiddenAddressCount() : 0;
|
|
}
|
|
|
|
void App::favoriteAddress(const std::string& addr)
|
|
{
|
|
if (settings_) {
|
|
settings_->favoriteAddress(addr);
|
|
settings_->save();
|
|
}
|
|
}
|
|
|
|
void App::unfavoriteAddress(const std::string& addr)
|
|
{
|
|
if (settings_) {
|
|
settings_->unfavoriteAddress(addr);
|
|
settings_->save();
|
|
}
|
|
}
|
|
|
|
bool App::isAddressFavorite(const std::string& addr) const
|
|
{
|
|
return settings_ && settings_->isAddressFavorite(addr);
|
|
}
|
|
|
|
// ============================================================================
|
|
// Key Export/Import Operations
|
|
// ============================================================================
|
|
|
|
void App::exportPrivateKey(const std::string& address, std::function<void(const std::string&)> callback)
|
|
{
|
|
if (!state_.connected || !rpc_) {
|
|
if (callback) callback("");
|
|
return;
|
|
}
|
|
|
|
// Check if it's a z-address or t-address
|
|
if (address.length() > 0 && address[0] == 'z') {
|
|
// Z-address: use z_exportkey
|
|
rpc_->z_exportKey(address, [callback](const json& result) {
|
|
if (callback) callback(result.get<std::string>());
|
|
}, [callback](const std::string& error) {
|
|
DEBUG_LOGF("Export z-key error: %s\n", error.c_str());
|
|
ui::Notifications::instance().error("Key export failed: " + error);
|
|
if (callback) callback("");
|
|
});
|
|
} else {
|
|
// T-address: use dumpprivkey
|
|
rpc_->dumpPrivKey(address, [callback](const json& result) {
|
|
if (callback) callback(result.get<std::string>());
|
|
}, [callback](const std::string& error) {
|
|
DEBUG_LOGF("Export t-key error: %s\n", error.c_str());
|
|
ui::Notifications::instance().error("Key export failed: " + error);
|
|
if (callback) callback("");
|
|
});
|
|
}
|
|
}
|
|
|
|
void App::exportAllKeys(std::function<void(const std::string&)> callback)
|
|
{
|
|
if (!state_.connected || !rpc_) {
|
|
if (callback) callback("");
|
|
return;
|
|
}
|
|
|
|
// Collect all keys into a string
|
|
auto keys_result = std::make_shared<std::string>();
|
|
auto pending = std::make_shared<int>(0);
|
|
auto total = std::make_shared<int>(0);
|
|
|
|
// First get all addresses
|
|
auto all_addresses = std::make_shared<std::vector<std::string>>();
|
|
|
|
// Add t-addresses
|
|
for (const auto& addr : state_.t_addresses) {
|
|
all_addresses->push_back(addr.address);
|
|
}
|
|
// Add z-addresses
|
|
for (const auto& addr : state_.z_addresses) {
|
|
all_addresses->push_back(addr.address);
|
|
}
|
|
|
|
*total = all_addresses->size();
|
|
*pending = *total;
|
|
|
|
if (*total == 0) {
|
|
if (callback) callback("# No addresses to export\n");
|
|
return;
|
|
}
|
|
|
|
*keys_result = "# DragonX Wallet Private Keys Export\n";
|
|
*keys_result += "# WARNING: Keep this file secure! Anyone with these keys can spend your coins!\n\n";
|
|
|
|
for (const auto& addr : *all_addresses) {
|
|
exportPrivateKey(addr, [keys_result, pending, total, callback, addr](const std::string& key) {
|
|
if (!key.empty()) {
|
|
*keys_result += "# " + addr + "\n";
|
|
*keys_result += key + "\n\n";
|
|
}
|
|
(*pending)--;
|
|
if (*pending == 0 && callback) {
|
|
callback(*keys_result);
|
|
}
|
|
});
|
|
}
|
|
}
|
|
|
|
void App::importPrivateKey(const std::string& key, std::function<void(bool, const std::string&)> callback)
|
|
{
|
|
if (!state_.connected || !rpc_) {
|
|
if (callback) callback(false, "Not connected");
|
|
return;
|
|
}
|
|
|
|
// Detect key type based on prefix
|
|
bool is_zkey = (key.length() > 0 && key[0] == 's'); // z-address keys start with 'secret-extended-key'
|
|
|
|
if (is_zkey) {
|
|
rpc_->z_importKey(key, true, [this, callback](const json& result) {
|
|
refreshAddresses();
|
|
if (callback) callback(true, "Z-address key imported successfully. Wallet is rescanning.");
|
|
}, [callback](const std::string& error) {
|
|
if (callback) callback(false, error);
|
|
});
|
|
} else {
|
|
rpc_->importPrivKey(key, true, [this, callback](const json& result) {
|
|
refreshAddresses();
|
|
if (callback) callback(true, "T-address key imported successfully. Wallet is rescanning.");
|
|
}, [callback](const std::string& error) {
|
|
if (callback) callback(false, error);
|
|
});
|
|
}
|
|
}
|
|
|
|
void App::backupWallet(const std::string& destination, std::function<void(bool, const std::string&)> callback)
|
|
{
|
|
if (!state_.connected || !rpc_) {
|
|
if (callback) callback(false, "Not connected");
|
|
return;
|
|
}
|
|
|
|
// Use z_exportwallet or similar to export all keys
|
|
// For now, we'll use exportAllKeys and save to file
|
|
exportAllKeys([destination, callback](const std::string& keys) {
|
|
if (keys.empty()) {
|
|
if (callback) callback(false, "Failed to export keys");
|
|
return;
|
|
}
|
|
|
|
// Write to file
|
|
std::ofstream file(destination);
|
|
if (!file.is_open()) {
|
|
if (callback) callback(false, "Could not open file: " + destination);
|
|
return;
|
|
}
|
|
|
|
file << keys;
|
|
file.close();
|
|
|
|
if (callback) callback(true, "Wallet backup saved to: " + destination);
|
|
});
|
|
}
|
|
|
|
// ============================================================================
|
|
// Transaction Operations
|
|
// ============================================================================
|
|
|
|
void App::sendTransaction(const std::string& from, const std::string& to,
|
|
double amount, double fee, const std::string& memo,
|
|
std::function<void(bool success, const std::string& result)> callback)
|
|
{
|
|
if (!state_.connected || !rpc_) {
|
|
if (callback) callback(false, "Not connected");
|
|
return;
|
|
}
|
|
|
|
// Build recipients array
|
|
nlohmann::json recipients = nlohmann::json::array();
|
|
nlohmann::json recipient;
|
|
recipient["address"] = to;
|
|
// Format amount to exactly 8 decimal places (satoshi precision).
|
|
// Sending a raw double can produce 15+ decimal digits which the
|
|
// daemon's ParseFixedPoint rejects with "Invalid amount".
|
|
char amt_buf[32];
|
|
snprintf(amt_buf, sizeof(amt_buf), "%.8f", amount);
|
|
recipient["amount"] = std::string(amt_buf);
|
|
if (!memo.empty()) {
|
|
recipient["memo"] = memo;
|
|
}
|
|
recipients.push_back(recipient);
|
|
|
|
// Run z_sendmany on worker thread to avoid blocking UI
|
|
if (worker_) {
|
|
worker_->post([this, from, recipients, callback]() -> rpc::RPCWorker::MainCb {
|
|
bool ok = false;
|
|
std::string result_str;
|
|
try {
|
|
auto result = rpc_->call("z_sendmany", {from, recipients});
|
|
result_str = result.get<std::string>();
|
|
ok = true;
|
|
} catch (const std::exception& e) {
|
|
result_str = e.what();
|
|
}
|
|
return [this, callback, ok, result_str]() {
|
|
if (ok) {
|
|
// A send changes address balances — refresh on next cycle
|
|
addresses_dirty_ = true;
|
|
// Force transaction list refresh so the sent tx appears immediately
|
|
transactions_dirty_ = true;
|
|
last_tx_block_height_ = -1;
|
|
// Track the opid so we can poll for completion
|
|
if (!result_str.empty()) {
|
|
pending_opids_.push_back(result_str);
|
|
}
|
|
}
|
|
if (callback) callback(ok, result_str);
|
|
};
|
|
});
|
|
}
|
|
}
|
|
|
|
} // namespace dragonx
|